summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-class-tpm4
-rw-r--r--Documentation/ABI/stable/sysfs-firmware-opal-elog2
-rw-r--r--Documentation/ABI/testing/sysfs-class-power58
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ppi19
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs12
-rw-r--r--Documentation/Changes1
-rw-r--r--Documentation/DMA-API-HOWTO.txt5
-rw-r--r--Documentation/DMA-API.txt6
-rw-r--r--Documentation/DocBook/.gitignore2
-rw-r--r--Documentation/DocBook/Makefile6
-rw-r--r--Documentation/DocBook/device-drivers.tmpl81
-rw-r--r--Documentation/DocBook/media/dvb/dvbapi.xml3
-rw-r--r--Documentation/DocBook/media/dvb/kdapi.xml2309
-rw-r--r--Documentation/DocBook/media/v4l/biblio.xml18
-rw-r--r--Documentation/DocBook/media/v4l/compat.xml20
-rw-r--r--Documentation/DocBook/media/v4l/controls.xml14
-rw-r--r--Documentation/DocBook/media/v4l/dev-sdr.xml32
-rw-r--r--Documentation/DocBook/media/v4l/io.xml10
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt.xml111
-rw-r--r--Documentation/DocBook/media/v4l/v4l2.xml13
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-encoder-cmd.xml2
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml7
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-fmt.xml2
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-modulator.xml14
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-tuner.xml16
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-querycap.xml6
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-queryctrl.xml21
-rw-r--r--Documentation/DocBook/media_api.tmpl2
-rw-r--r--Documentation/SubmittingPatches8
-rw-r--r--Documentation/blockdev/zram.txt44
-rw-r--r--Documentation/cgroups/blkio-controller.txt2
-rw-r--r--Documentation/cgroups/cgroups.txt4
-rw-r--r--Documentation/cgroups/freezer-subsystem.txt2
-rw-r--r--Documentation/cgroups/unified-hierarchy.txt27
-rw-r--r--Documentation/crypto/asymmetric-keys.txt27
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt22
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt21
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/at91-clock.txt35
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt45
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt78
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt69
-rw-r--r--Documentation/devicetree/bindings/clock/silabs,si514.txt24
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt1
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt19
-rw-r--r--Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt27
-rw-r--r--Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt3
-rw-r--r--Documentation/devicetree/bindings/mfd/axp20x.txt4
-rw-r--r--Documentation/devicetree/bindings/power/bq24257.txt53
-rw-r--r--Documentation/devicetree/bindings/power_supply/axp20x_usb_power.txt34
-rw-r--r--Documentation/devicetree/bindings/power_supply/qcom_smbb.txt131
-rw-r--r--Documentation/devicetree/bindings/power_supply/tps65217_charger.txt12
-rw-r--r--Documentation/devicetree/bindings/regulator/act8865-regulator.txt3
-rw-r--r--Documentation/devicetree/bindings/regulator/anatop-regulator.txt1
-rw-r--r--Documentation/devicetree/bindings/regulator/arizona-regulator.txt17
-rw-r--r--Documentation/devicetree/bindings/regulator/max77802.txt25
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt1
-rw-r--r--Documentation/devicetree/bindings/regulator/tps65023.txt60
-rw-r--r--Documentation/devicetree/bindings/spi/brcm,bcm2835-aux-spi.txt38
-rw-r--r--Documentation/devicetree/bindings/spi/spi-mt65xx.txt9
-rw-r--r--Documentation/email-clients.txt2
-rw-r--r--Documentation/filesystems/f2fs.txt3
-rw-r--r--Documentation/filesystems/path-lookup.md1297
-rw-r--r--Documentation/filesystems/path-lookup.txt2
-rw-r--r--Documentation/filesystems/sysfs-tagging.txt14
-rw-r--r--Documentation/filesystems/sysfs.txt9
-rw-r--r--Documentation/gpio/board.txt10
-rw-r--r--Documentation/gpio/sysfs.txt6
-rw-r--r--Documentation/kernel-docs.txt14
-rw-r--r--Documentation/kernel-parameters.txt18
-rw-r--r--Documentation/kselftest.txt16
-rw-r--r--Documentation/misc-devices/apds990x.txt2
-rw-r--r--Documentation/misc-devices/isl290032
-rw-r--r--Documentation/misc-devices/max68752
-rw-r--r--Documentation/networking/can.txt97
-rw-r--r--Documentation/rbtree.txt2
-rw-r--r--Documentation/security/Smack.txt10
-rw-r--r--Documentation/security/keys.txt41
-rw-r--r--Documentation/video4linux/CARDLIST.saa71341
-rw-r--r--Documentation/video4linux/v4l2-pci-skeleton.c4
-rw-r--r--Documentation/virtual/kvm/api.txt56
-rw-r--r--Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt187
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic.txt18
-rw-r--r--Documentation/virtual/kvm/devices/vm.txt2
-rw-r--r--Documentation/virtual/kvm/locking.txt12
-rw-r--r--Documentation/virtual/kvm/ppc-pv.txt2
-rw-r--r--Documentation/zh_CN/filesystems/sysfs.txt2
-rw-r--r--MAINTAINERS44
-rw-r--r--Makefile12
-rw-r--r--README2
-rw-r--r--arch/arm/boot/dts/twl4030.dtsi2
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/include/asm/kvm_arm.h20
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm/kvm/Kconfig2
-rw-r--r--arch/arm/kvm/arm.c76
-rw-r--r--arch/arm/kvm/psci.c10
-rw-r--r--arch/arm/kvm/trace.h10
-rw-r--r--arch/arm/mach-at91/Kconfig3
-rw-r--r--arch/arm/mach-bcm/Kconfig2
-rw-r--r--arch/arm/mach-u300/dummyspichip.c1
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/dma-mapping.h15
-rw-r--r--arch/arm64/include/asm/kvm_arm.h16
-rw-r--r--arch/arm64/include/asm/kvm_host.h5
-rw-r--r--arch/arm64/kvm/Kconfig2
-rw-r--r--arch/arm64/kvm/hyp.S8
-rw-r--r--arch/arm64/mm/dma-mapping.c457
-rw-r--r--arch/mips/bcm63xx/dev-spi.c42
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h44
-rw-r--r--arch/mips/txx9/generic/spi_eeprom.c1
-rw-r--r--arch/powerpc/include/asm/disassemble.h5
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h6
-rw-r--r--arch/powerpc/kernel/prom_init.c40
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S29
-rw-r--r--arch/powerpc/kvm/e500.c3
-rw-r--r--arch/powerpc/kvm/e500_emulate.c19
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/asm/pci.h4
-rw-r--r--arch/s390/include/asm/pci_dma.h5
-rw-r--r--arch/s390/kvm/intercept.c42
-rw-r--r--arch/s390/kvm/interrupt.c116
-rw-r--r--arch/s390/kvm/kvm-s390.c58
-rw-r--r--arch/s390/kvm/kvm-s390.h35
-rw-r--r--arch/s390/kvm/priv.c19
-rw-r--r--arch/s390/pci/pci_dma.c37
-rw-r--r--arch/sparc/include/asm/topology_64.h3
-rw-r--r--arch/sparc/include/uapi/asm/asi.h2
-rw-r--r--arch/sparc/kernel/iommu.c12
-rw-r--r--arch/sparc/kernel/ldc.c2
-rw-r--r--arch/sparc/kernel/pci_sun4v.c18
-rw-r--r--arch/sparc/kernel/unaligned_64.c22
-rw-r--r--arch/sparc/lib/VISsave.S10
-rw-r--r--arch/sparc/mm/init_64.c70
-rw-r--r--arch/unicore32/Kconfig2
-rw-r--r--arch/x86/include/asm/irq_remapping.h10
-rw-r--r--arch/x86/include/asm/kvm_emulate.h10
-rw-r--r--arch/x86/include/asm/kvm_host.h38
-rw-r--r--arch/x86/include/asm/vmx.h3
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h18
-rw-r--r--arch/x86/include/uapi/asm/vmx.h4
-rw-r--r--arch/x86/kernel/kvmclock.c46
-rw-r--r--arch/x86/kvm/Kconfig2
-rw-r--r--arch/x86/kvm/assigned-dev.c62
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/cpuid.h37
-rw-r--r--arch/x86/kvm/emulate.c35
-rw-r--r--arch/x86/kvm/hyperv.c31
-rw-r--r--arch/x86/kvm/i8254.c4
-rw-r--r--arch/x86/kvm/ioapic.c29
-rw-r--r--arch/x86/kvm/ioapic.h15
-rw-r--r--arch/x86/kvm/irq.c40
-rw-r--r--arch/x86/kvm/irq.h27
-rw-r--r--arch/x86/kvm/irq_comm.c129
-rw-r--r--arch/x86/kvm/lapic.c127
-rw-r--r--arch/x86/kvm/lapic.h7
-rw-r--r--arch/x86/kvm/mmu.c91
-rw-r--r--arch/x86/kvm/paging_tmpl.h19
-rw-r--r--arch/x86/kvm/svm.c43
-rw-r--r--arch/x86/kvm/trace.h51
-rw-r--r--arch/x86/kvm/vmx.c750
-rw-r--r--arch/x86/kvm/x86.c256
-rw-r--r--block/blk-cgroup.c1
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/cfq-iosched.c4
-rw-r--r--certs/.gitignore4
-rw-r--r--crypto/asymmetric_keys/asymmetric_keys.h5
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c44
-rw-r--r--crypto/asymmetric_keys/public_key.c4
-rw-r--r--crypto/asymmetric_keys/signature.c2
-rw-r--r--crypto/asymmetric_keys/x509_parser.h1
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c9
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c12
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_platform.c1
-rw-r--r--drivers/ata/ahci_qoriq.c279
-rw-r--r--drivers/ata/libahci.c30
-rw-r--r--drivers/ata/libata-scsi.c59
-rw-r--r--drivers/ata/pata_it821x.c6
-rw-r--r--drivers/ata/pata_macio.c1
-rw-r--r--drivers/ata/pata_pxa.c171
-rw-r--r--drivers/ata/pata_samsung_cf.c2
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/char/tpm/st33zp24/Kconfig2
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c1
-rw-r--r--drivers/char/tpm/st33zp24/spi.c1
-rw-r--r--drivers/char/tpm/tpm-chip.c24
-rw-r--r--drivers/char/tpm/tpm-interface.c76
-rw-r--r--drivers/char/tpm/tpm.h134
-rw-r--r--drivers/char/tpm/tpm2-cmd.c250
-rw-r--r--drivers/char/tpm/tpm_crb.c39
-rw-r--r--drivers/char/tpm/tpm_eventlog.c78
-rw-r--r--drivers/char/tpm/tpm_eventlog.h6
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c1
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c1
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c1
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c2
-rw-r--r--drivers/char/tpm/tpm_of.c6
-rw-r--r--drivers/char/tpm/tpm_ppi.c34
-rw-r--r--drivers/char/tpm/tpm_tis.c192
-rw-r--r--drivers/clk/Kconfig19
-rw-r--r--drivers/clk/Makefile5
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-generated.c306
-rw-r--r--drivers/clk/at91/clk-peripheral.c24
-rw-r--r--drivers/clk/at91/clk-system.c3
-rw-r--r--drivers/clk/at91/clk-utmi.c4
-rw-r--r--drivers/clk/at91/pmc.c21
-rw-r--r--drivers/clk/at91/pmc.h3
-rw-r--r--drivers/clk/bcm/Kconfig6
-rw-r--r--drivers/clk/bcm/Makefile4
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c1575
-rw-r--r--drivers/clk/bcm/clk-cygnus.c155
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c196
-rw-r--r--drivers/clk/bcm/clk-iproc.h22
-rw-r--r--drivers/clk/bcm/clk-ns2.c288
-rw-r--r--drivers/clk/bcm/clk-nsp.c139
-rw-r--r--drivers/clk/berlin/bg2.c4
-rw-r--r--drivers/clk/berlin/bg2q.c2
-rw-r--r--drivers/clk/clk-bcm2835.c55
-rw-r--r--drivers/clk/clk-divider.c20
-rw-r--r--drivers/clk/clk-fractional-divider.c53
-rw-r--r--drivers/clk/clk-max77802.c2
-rw-r--r--drivers/clk/clk-multiplier.c130
-rw-r--r--drivers/clk/clk-si514.c379
-rw-r--r--drivers/clk/clk-si5351.c17
-rw-r--r--drivers/clk/clk-xgene.c1
-rw-r--r--drivers/clk/clk.c38
-rw-r--r--drivers/clk/hisilicon/clk-hi6220-stub.c2
-rw-r--r--drivers/clk/imx/clk-imx25.c12
-rw-r--r--drivers/clk/imx/clk-imx27.c17
-rw-r--r--drivers/clk/imx/clk-imx31.c39
-rw-r--r--drivers/clk/imx/clk-imx35.c59
-rw-r--r--drivers/clk/imx/clk-imx51-imx53.c16
-rw-r--r--drivers/clk/imx/clk-imx6q.c12
-rw-r--r--drivers/clk/imx/clk-imx6sl.c12
-rw-r--r--drivers/clk/imx/clk-imx6sx.c9
-rw-r--r--drivers/clk/imx/clk-imx6ul.c18
-rw-r--r--drivers/clk/imx/clk-imx7d.c14
-rw-r--r--drivers/clk/imx/clk-pllv2.c12
-rw-r--r--drivers/clk/imx/clk-vf610.c1
-rw-r--r--drivers/clk/imx/clk.c38
-rw-r--r--drivers/clk/imx/clk.h1
-rw-r--r--drivers/clk/keystone/pll.c2
-rw-r--r--drivers/clk/mediatek/Makefile2
-rw-r--r--drivers/clk/mediatek/clk-apmixed.c107
-rw-r--r--drivers/clk/mediatek/clk-gate.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c347
-rw-r--r--drivers/clk/mediatek/clk-mtk.c36
-rw-r--r--drivers/clk/mediatek/clk-mtk.h24
-rw-r--r--drivers/clk/mediatek/clk-pll.c7
-rw-r--r--drivers/clk/mvebu/clk-cpu.c4
-rw-r--r--drivers/clk/mvebu/common.c2
-rw-r--r--drivers/clk/mxs/clk-frac.c12
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c17
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c42
-rw-r--r--drivers/clk/qcom/Kconfig9
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/clk-rcg.c230
-rw-r--r--drivers/clk/qcom/clk-rcg.h8
-rw-r--r--drivers/clk/qcom/clk-rcg2.c170
-rw-r--r--drivers/clk/qcom/common.c42
-rw-r--r--drivers/clk/qcom/common.h4
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c49
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c7
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c7
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c569
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c20
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c22
-rw-r--r--drivers/clk/qcom/gdsc.c237
-rw-r--r--drivers/clk/qcom/gdsc.h68
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c7
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c7
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c111
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c411
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c92
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c11
-rw-r--r--drivers/clk/rockchip/clk-pll.c135
-rw-r--r--drivers/clk/rockchip/clk.c6
-rw-r--r--drivers/clk/samsung/clk-exynos7.c467
-rw-r--r--drivers/clk/shmobile/clk-mstp.c2
-rw-r--r--drivers/clk/shmobile/clk-r8a7778.c8
-rw-r--r--drivers/clk/sirf/clk-atlas7.c358
-rw-r--r--drivers/clk/st/clk-flexgen.c7
-rw-r--r--drivers/clk/st/clkgen-mux.c3
-rw-r--r--drivers/clk/st/clkgen-pll.c469
-rw-r--r--drivers/clk/st/clkgen.h2
-rw-r--r--drivers/clk/sunxi/Makefile3
-rw-r--r--drivers/clk/sunxi/clk-a10-codec.c44
-rw-r--r--drivers/clk/sunxi/clk-a10-mod1.c81
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c216
-rw-r--r--drivers/clk/sunxi/clk-simple-gates.c2
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c1
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0.c1
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-apb0.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c1
-rw-r--r--drivers/clk/tegra/clk-dfll.c114
-rw-r--r--drivers/clk/tegra/clk-emc.c4
-rw-r--r--drivers/clk/tegra/clk-tegra-audio.c25
-rw-r--r--drivers/clk/tegra/clk-tegra114.c8
-rw-r--r--drivers/clk/tegra/clk-tegra124.c8
-rw-r--r--drivers/clk/tegra/clk-tegra30.c8
-rw-r--r--drivers/clk/tegra/clk.h106
-rw-r--r--drivers/clk/tegra/cvb.c7
-rw-r--r--drivers/clk/versatile/Kconfig2
-rw-r--r--drivers/clk/versatile/clk-icst.c4
-rw-r--r--drivers/firmware/efi/efi-pstore.c2
-rw-r--r--drivers/gpio/gpio-74x164.c1
-rw-r--r--drivers/gpio/gpio-max7301.c1
-rw-r--r--drivers/gpio/gpio-mc33880.c1
-rw-r--r--drivers/gpio/gpio-mcp23s08.c2
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c1
-rw-r--r--drivers/hsi/clients/ssi_protocol.c2
-rw-r--r--drivers/hsi/controllers/omap_ssi.c21
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c2
-rw-r--r--drivers/hsi/hsi.c13
-rw-r--r--drivers/hv/hyperv_vmbus.h5
-rw-r--r--drivers/hwmon/ad7314.c1
-rw-r--r--drivers/hwmon/adcxx.c1
-rw-r--r--drivers/hwmon/ads7871.c1
-rw-r--r--drivers/hwmon/adt7310.c1
-rw-r--r--drivers/hwmon/lm70.c1
-rw-r--r--drivers/hwmon/max1111.c1
-rw-r--r--drivers/iio/accel/kxsd9.c1
-rw-r--r--drivers/iio/accel/st_accel_spi.c1
-rw-r--r--drivers/iio/adc/ad7266.c1
-rw-r--r--drivers/iio/adc/ad7298.c1
-rw-r--r--drivers/iio/adc/ad7476.c1
-rw-r--r--drivers/iio/adc/ad7791.c1
-rw-r--r--drivers/iio/adc/ad7793.c1
-rw-r--r--drivers/iio/adc/ad7887.c1
-rw-r--r--drivers/iio/adc/ad7923.c1
-rw-r--r--drivers/iio/adc/max1027.c1
-rw-r--r--drivers/iio/adc/mcp320x.c1
-rw-r--r--drivers/iio/adc/ti-adc128s052.c1
-rw-r--r--drivers/iio/amplifiers/ad8366.c1
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c1
-rw-r--r--drivers/iio/dac/ad5064.c1
-rw-r--r--drivers/iio/dac/ad5360.c1
-rw-r--r--drivers/iio/dac/ad5380.c1
-rw-r--r--drivers/iio/dac/ad5421.c1
-rw-r--r--drivers/iio/dac/ad5446.c1
-rw-r--r--drivers/iio/dac/ad5449.c1
-rw-r--r--drivers/iio/dac/ad5504.c1
-rw-r--r--drivers/iio/dac/ad5624r_spi.c1
-rw-r--r--drivers/iio/dac/ad5686.c1
-rw-r--r--drivers/iio/dac/ad5755.c1
-rw-r--r--drivers/iio/dac/ad5764.c1
-rw-r--r--drivers/iio/dac/ad5791.c1
-rw-r--r--drivers/iio/dac/ad7303.c1
-rw-r--r--drivers/iio/dac/mcp4922.c1
-rw-r--r--drivers/iio/frequency/ad9523.c1
-rw-r--r--drivers/iio/frequency/adf4350.c1
-rw-r--r--drivers/iio/gyro/adis16080.c1
-rw-r--r--drivers/iio/gyro/adis16130.c1
-rw-r--r--drivers/iio/gyro/adis16136.c1
-rw-r--r--drivers/iio/gyro/adis16260.c1
-rw-r--r--drivers/iio/gyro/adxrs450.c1
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c1
-rw-r--r--drivers/iio/imu/adis16400_core.c1
-rw-r--r--drivers/iio/imu/adis16480.c1
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c1
-rw-r--r--drivers/iio/pressure/ms5611_spi.c1
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c1
-rw-r--r--drivers/iio/proximity/as3935.c1
-rw-r--r--drivers/input/misc/ad714x-spi.c1
-rw-r--r--drivers/input/misc/adxl34x-spi.c1
-rw-r--r--drivers/input/touchscreen/ad7877.c1
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c1
-rw-r--r--drivers/input/touchscreen/ads7846.c1
-rw-r--r--drivers/input/touchscreen/cyttsp4_spi.c1
-rw-r--r--drivers/input/touchscreen/cyttsp_spi.c1
-rw-r--r--drivers/input/touchscreen/sur40.c20
-rw-r--r--drivers/input/touchscreen/tsc2005.c1
-rw-r--r--drivers/iommu/Kconfig25
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd_iommu.c173
-rw-r--r--drivers/iommu/amd_iommu_init.c120
-rw-r--r--drivers/iommu/amd_iommu_types.h13
-rw-r--r--drivers/iommu/arm-smmu-v3.c155
-rw-r--r--drivers/iommu/arm-smmu.c132
-rw-r--r--drivers/iommu/dma-iommu.c524
-rw-r--r--drivers/iommu/dmar.c42
-rw-r--r--drivers/iommu/fsl_pamu_domain.c41
-rw-r--r--drivers/iommu/intel-iommu.c386
-rw-r--r--drivers/iommu/intel-svm.c602
-rw-r--r--drivers/iommu/intel_irq_remapping.c64
-rw-r--r--drivers/iommu/iommu.c46
-rw-r--r--drivers/iommu/irq_remapping.c12
-rw-r--r--drivers/iommu/omap-iommu.c58
-rw-r--r--drivers/iommu/omap-iommu.h9
-rw-r--r--drivers/iommu/s390-iommu.c337
-rw-r--r--drivers/leds/leds-dac124s085.c1
-rw-r--r--drivers/media/dvb-core/demux.h619
-rw-r--r--drivers/media/dvb-core/dmxdev.c10
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.h99
-rw-r--r--drivers/media/dvb-core/dvb_demux.c11
-rw-r--r--drivers/media/dvb-core/dvb_net.c5
-rw-r--r--drivers/media/dvb-core/dvbdev.h4
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c3
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c23
-rw-r--r--drivers/media/i2c/ml86v7667.c11
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-spi.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c1
-rw-r--r--drivers/media/i2c/tvp5150.c14
-rw-r--r--drivers/media/media-entity.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c5
-rw-r--r--drivers/media/pci/cobalt/Kconfig2
-rw-r--r--drivers/media/pci/cobalt/cobalt-cpld.c8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.h6
-rw-r--r--drivers/media/pci/cobalt/cobalt-irq.c7
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c24
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c13
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c24
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c11
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c18
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c29
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c24
-rw-r--r--drivers/media/pci/cx25821/cx25821.h3
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c15
-rw-r--r--drivers/media/pci/cx88/cx88-core.c8
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c13
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c14
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c19
-rw-r--r--drivers/media/pci/cx88/cx88-video.c21
-rw-r--r--drivers/media/pci/cx88/cx88.h2
-rw-r--r--drivers/media/pci/dt3155/dt3155.c20
-rw-r--r--drivers/media/pci/dt3155/dt3155.h3
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c14
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c12
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb.h4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_ci.c10
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c35
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c2
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_spi.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c43
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c14
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c7
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c16
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c12
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c23
-rw-r--r--drivers/media/pci/saa7134/saa7134.h5
-rw-r--r--drivers/media/pci/saa7164/Kconfig1
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c653
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c629
-rw-r--r--drivers/media/pci/saa7164/saa7164.h26
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c48
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c21
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h4
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c28
-rw-r--r--drivers/media/pci/ttpci/av7110.c9
-rw-r--r--drivers/media/pci/ttpci/av7110_av.c6
-rw-r--r--drivers/media/pci/tw68/tw68-video.c22
-rw-r--r--drivers/media/pci/tw68/tw68.h3
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c43
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.h3
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c37
-rw-r--r--drivers/media/platform/coda/coda-bit.c135
-rw-r--r--drivers/media/platform/coda/coda-common.c26
-rw-r--r--drivers/media/platform/coda/coda-jpeg.c6
-rw-r--r--drivers/media/platform/coda/coda.h8
-rw-r--r--drivers/media/platform/coda/trace.h18
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c34
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c33
-rw-r--r--drivers/media/platform/davinci/vpif_capture.h2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c42
-rw-r--r--drivers/media/platform/davinci/vpif_display.h2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h4
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c25
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c33
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.h4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c18
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.h4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c25
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.h4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c23
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c3
-rw-r--r--drivers/media/platform/m2m-deinterlace.c25
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c46
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h2
-rw-r--r--drivers/media/platform/mx2_emmaprp.c17
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c5
-rw-r--r--drivers/media/platform/omap3isp/ispstat.h2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c27
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h4
-rw-r--r--drivers/media/platform/rcar_jpu.c68
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c26
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c2
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.h4
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c19
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c475
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h41
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c80
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h11
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-regs.h85
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c106
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c40
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c75
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c46
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c68
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h4
-rw-r--r--drivers/media/platform/s5p-tv/mixer_grp_layer.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_reg.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c13
-rw-r--r--drivers/media/platform/s5p-tv/mixer_vp_layer.c5
-rw-r--r--drivers/media/platform/sh_veu.c22
-rw-r--r--drivers/media/platform/sh_vou.c29
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c153
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.h (renamed from include/media/atmel-isi.h)7
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c24
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c30
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c64
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c63
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c4
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c26
-rw-r--r--drivers/media/platform/sti/c8sectpfe/Kconfig1
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c4
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c44
-rw-r--r--drivers/media/platform/vim2m.c57
-rw-r--r--drivers/media/platform/vivid/Kconfig8
-rw-r--r--drivers/media/platform/vivid/vivid-core.c7
-rw-r--r--drivers/media/platform/vivid/vivid-core.h6
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c55
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c73
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c34
-rw-r--r--drivers/media/platform/vivid/vivid-osd.c1
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c83
-rw-r--r--drivers/media/platform/vivid/vivid-tpg-colors.c328
-rw-r--r--drivers/media/platform/vivid/vivid-tpg-colors.h4
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.c91
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c40
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.c20
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c18
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c56
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c18
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c23
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.h8
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c4
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c29
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h2
-rw-r--r--drivers/media/tuners/msi001.c1
-rw-r--r--drivers/media/usb/airspy/airspy.c26
-rw-r--r--drivers/media/usb/au0828/au0828-vbi.c10
-rw-r--r--drivers/media/usb/au0828/au0828-video.c48
-rw-r--r--drivers/media/usb/au0828/au0828.h3
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c10
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c37
-rw-r--r--drivers/media/usb/em28xx/em28xx.h3
-rw-r--r--drivers/media/usb/go7007/go7007-driver.c29
-rw-r--r--drivers/media/usb/go7007/go7007-fw.c6
-rw-r--r--drivers/media/usb/go7007/go7007-priv.h4
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c22
-rw-r--r--drivers/media/usb/gspca/gspca.c4
-rw-r--r--drivers/media/usb/hackrf/hackrf.c1086
-rw-r--r--drivers/media/usb/msi2500/msi2500.c19
-rw-r--r--drivers/media/usb/pwc/pwc-if.c35
-rw-r--r--drivers/media/usb/pwc/pwc-uncompress.c6
-rw-r--r--drivers/media/usb/pwc/pwc.h4
-rw-r--r--drivers/media/usb/s2255/s2255drv.c29
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c17
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c12
-rw-r--r--drivers/media/usb/stk1160/stk1160.h4
-rw-r--r--drivers/media/usb/tm6000/tm6000-alsa.c2
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c12
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c24
-rw-r--r--drivers/media/usb/usbtv/usbtv.h3
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c3
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c29
-rw-r--r--drivers/media/usb/uvc/uvc_video.c23
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h7
-rw-r--r--drivers/media/v4l2-core/Makefile4
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c51
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c14
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c14
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c51
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c33
-rw-r--r--drivers/media/v4l2-core/v4l2-trace.c10
-rw-r--r--drivers/media/v4l2-core/vb2-trace.c9
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c2014
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-internal.h161
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c1661
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c2
-rw-r--r--drivers/mfd/arizona-spi.c1
-rw-r--r--drivers/mfd/cros_ec_spi.c1
-rw-r--r--drivers/mfd/da9052-spi.c1
-rw-r--r--drivers/mfd/ezx-pcap.c1
-rw-r--r--drivers/mfd/mc13xxx-spi.c1
-rw-r--r--drivers/mfd/stmpe-spi.c1
-rw-r--r--drivers/mfd/tps6105x.c78
-rw-r--r--drivers/mfd/tps65912-spi.c1
-rw-r--r--drivers/mfd/wm831x-spi.c1
-rw-r--r--drivers/misc/ad525x_dpot-spi.c1
-rw-r--r--drivers/misc/bmp085-spi.c1
-rw-r--r--drivers/misc/eeprom/at25.c1
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c1
-rw-r--r--drivers/misc/lattice-ecp3-config.c1
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c1
-rw-r--r--drivers/misc/ti_dac7512.c1
-rw-r--r--drivers/mmc/host/mmc_spi.c1
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c1
-rw-r--r--drivers/mtd/devices/sst25l.c1
-rw-r--r--drivers/net/can/spi/mcp251x.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c1
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c1
-rw-r--r--drivers/net/ieee802154/at86rf230.c1
-rw-r--r--drivers/net/ieee802154/cc2520.c1
-rw-r--r--drivers/net/ieee802154/mrf24j40.c1
-rw-r--r--drivers/net/phy/spi_ks8995.c1
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c1
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/p54/p54spi.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c1
-rw-r--r--drivers/nfc/st-nci/spi.c1
-rw-r--r--drivers/nfc/trf7970a.c1
-rw-r--r--drivers/power/88pm860x_battery.c37
-rw-r--r--drivers/power/Kconfig56
-rw-r--r--drivers/power/Makefile5
-rw-r--r--drivers/power/axp20x_usb_power.c248
-rw-r--r--drivers/power/bq2415x_charger.c2
-rw-r--r--drivers/power/bq24190_charger.c1
-rw-r--r--drivers/power/bq24257_charger.c492
-rw-r--r--drivers/power/bq27x00_battery.c1129
-rw-r--r--drivers/power/bq27xxx_battery.c1375
-rw-r--r--drivers/power/charger-manager.c4
-rw-r--r--drivers/power/lp8727_charger.c31
-rw-r--r--drivers/power/max17042_battery.c26
-rw-r--r--drivers/power/max8903_charger.c93
-rw-r--r--drivers/power/max8998_charger.c29
-rw-r--r--drivers/power/pm2301_charger.c1
-rw-r--r--drivers/power/qcom_smbb.c951
-rw-r--r--drivers/power/reset/Kconfig4
-rw-r--r--drivers/power/reset/at91-poweroff.c33
-rw-r--r--drivers/power/reset/at91-reset.c69
-rw-r--r--drivers/power/rt9455_charger.c1
-rw-r--r--drivers/power/smb347-charger.c1
-rw-r--r--drivers/power/tps65090-charger.c1
-rw-r--r--drivers/power/tps65217_charger.c264
-rw-r--r--drivers/power/twl4030_charger.c39
-rw-r--r--drivers/power/wm831x_power.c15
-rw-r--r--drivers/regulator/Kconfig2
-rw-r--r--drivers/regulator/act8865-regulator.c24
-rw-r--r--drivers/regulator/anatop-regulator.c3
-rw-r--r--drivers/regulator/arizona-ldo1.c20
-rw-r--r--drivers/regulator/axp20x-regulator.c54
-rw-r--r--drivers/regulator/bcm590xx-regulator.c2
-rw-r--r--drivers/regulator/core.c512
-rw-r--r--drivers/regulator/da9052-regulator.c1
-rw-r--r--drivers/regulator/da9063-regulator.c2
-rw-r--r--drivers/regulator/of_regulator.c3
-rw-r--r--drivers/regulator/pwm-regulator.c35
-rw-r--r--drivers/regulator/qcom_smd-regulator.c28
-rw-r--r--drivers/regulator/tps6105x-regulator.c16
-rw-r--r--drivers/regulator/tps65023-regulator.c282
-rw-r--r--drivers/regulator/tps6524x-regulator.c1
-rw-r--r--drivers/rtc/rtc-ds1305.c1
-rw-r--r--drivers/rtc/rtc-ds1343.c1
-rw-r--r--drivers/rtc/rtc-ds1347.c1
-rw-r--r--drivers/rtc/rtc-ds1390.c1
-rw-r--r--drivers/rtc/rtc-ds3234.c1
-rw-r--r--drivers/rtc/rtc-m41t93.c1
-rw-r--r--drivers/rtc/rtc-m41t94.c1
-rw-r--r--drivers/rtc/rtc-max6902.c1
-rw-r--r--drivers/rtc/rtc-mcp795.c1
-rw-r--r--drivers/rtc/rtc-pcf2123.c1
-rw-r--r--drivers/rtc/rtc-r9701.c1
-rw-r--r--drivers/rtc/rtc-rs5c348.c1
-rw-r--r--drivers/rtc/rtc-rx4581.c1
-rw-r--r--drivers/scsi/be2iscsi/Kconfig4
-rw-r--r--drivers/scsi/be2iscsi/Makefile2
-rw-r--r--drivers/scsi/be2iscsi/be.h4
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c4
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h4
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c4
-rw-r--r--drivers/scsi/be2iscsi/be_main.c8
-rw-r--r--drivers/scsi/be2iscsi/be_main.h6
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c4
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/cxlflash/common.h30
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c45
-rw-r--r--drivers/scsi/cxlflash/main.c1500
-rw-r--r--drivers/scsi/cxlflash/main.h1
-rw-r--r--drivers/scsi/cxlflash/sislite.h8
-rw-r--r--drivers/scsi/cxlflash/superpipe.c209
-rw-r--r--drivers/scsi/cxlflash/superpipe.h14
-rw-r--r--drivers/scsi/cxlflash/vlun.c68
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c46
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c16
-rw-r--r--drivers/scsi/lpfc/lpfc.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c100
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/scsi_devinfo.c181
-rw-r--r--drivers/spi/Kconfig15
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-ath79.c11
-rw-r--r--drivers/spi/spi-atmel.c24
-rw-r--r--drivers/spi/spi-au1550.c11
-rw-r--r--drivers/spi/spi-bcm2835.c14
-rw-r--r--drivers/spi/spi-bcm2835aux.c512
-rw-r--r--drivers/spi/spi-bcm53xx.c13
-rw-r--r--drivers/spi/spi-bcm63xx.c210
-rw-r--r--drivers/spi/spi-bfin-sport.c5
-rw-r--r--drivers/spi/spi-bfin5xx.c6
-rw-r--r--drivers/spi/spi-bitbang.c152
-rw-r--r--drivers/spi/spi-coldfire-qspi.c3
-rw-r--r--drivers/spi/spi-davinci.c15
-rw-r--r--drivers/spi/spi-dw-mmio.c7
-rw-r--r--drivers/spi/spi-dw-pci.c29
-rw-r--r--drivers/spi/spi-dw.c108
-rw-r--r--drivers/spi/spi-dw.h6
-rw-r--r--drivers/spi/spi-fsl-dspi.c3
-rw-r--r--drivers/spi/spi-imx.c7
-rw-r--r--drivers/spi/spi-mpc512x-psc.c8
-rw-r--r--drivers/spi/spi-mt65xx.c129
-rw-r--r--drivers/spi/spi-oc-tiny.c14
-rw-r--r--drivers/spi/spi-octeon.c2
-rw-r--r--drivers/spi/spi-omap-100k.c26
-rw-r--r--drivers/spi/spi-omap-uwire.c7
-rw-r--r--drivers/spi/spi-omap2-mcspi.c28
-rw-r--r--drivers/spi/spi-ppc4xx.c4
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c257
-rw-r--r--drivers/spi/spi-pxa2xx.h9
-rw-r--r--drivers/spi/spi-s3c24xx.c4
-rw-r--r--drivers/spi/spi-s3c64xx.c46
-rw-r--r--drivers/spi/spi-ti-qspi.c88
-rw-r--r--drivers/spi/spi-tle62x0.c1
-rw-r--r--drivers/spi/spi-txx9.c2
-rw-r--r--drivers/spi/spi-xilinx.c38
-rw-r--r--drivers/spi/spi.c136
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/staging/fbtft/fbtft.h1
-rw-r--r--drivers/staging/fbtft/flexfb.c1
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c1
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c1
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c1
-rw-r--r--drivers/staging/iio/adc/ad7192.c1
-rw-r--r--drivers/staging/iio/adc/ad7280a.c1
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c1
-rw-r--r--drivers/staging/iio/adc/ad7780.c1
-rw-r--r--drivers/staging/iio/adc/ad7816.c1
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c1
-rw-r--r--drivers/staging/iio/frequency/ad9832.c1
-rw-r--r--drivers/staging/iio/frequency/ad9834.c1
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c2
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843_spi.c1
-rw-r--r--drivers/staging/iio/meter/ade7753.c1
-rw-r--r--drivers/staging/iio/meter/ade7754.c1
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c1
-rw-r--r--drivers/staging/iio/meter/ade7759.c1
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c8
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c20
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c45
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h3
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c2
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c32
-rw-r--r--drivers/staging/media/omap4iss/iss.c5
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c26
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h6
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c1
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/ifx6x60.c2
-rw-r--r--drivers/tty/serial/max3100.c1
-rw-r--r--drivers/tty/serial/max310x.c1
-rw-r--r--drivers/tty/serial/sc16is7xx.c1
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c28
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h4
-rw-r--r--drivers/usb/host/max3421-hcd.c1
-rw-r--r--drivers/vfio/Kconfig1
-rw-r--r--drivers/vfio/pci/Kconfig1
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c9
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h2
-rw-r--r--drivers/video/backlight/ams369fg06.c1
-rw-r--r--drivers/video/backlight/corgi_lcd.c1
-rw-r--r--drivers/video/backlight/ili922x.c1
-rw-r--r--drivers/video/backlight/l4f00242t03.c1
-rw-r--r--drivers/video/backlight/ld9040.c1
-rw-r--r--drivers/video/backlight/lms283gf05.c1
-rw-r--r--drivers/video/backlight/lms501kf03.c1
-rw-r--r--drivers/video/backlight/ltv350qv.c1
-rw-r--r--drivers/video/backlight/s6e63m0.c1
-rw-r--r--drivers/video/backlight/tdo24m.c1
-rw-r--r--drivers/video/backlight/tosa_lcd.c1
-rw-r--r--drivers/video/backlight/vgg2432a4.c1
-rw-r--r--drivers/video/fbdev/mmp/panel/tpo_tj032md01bw.c1
-rw-r--r--drivers/video/fbdev/omap/lcd_mipid.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c1
-rw-r--r--drivers/w1/slaves/w1_bq27000.c5
-rw-r--r--fs/9p/vfs_file.c4
-rw-r--r--fs/ceph/locks.c4
-rw-r--r--fs/cifs/cifs_spnego.c6
-rw-r--r--fs/cifs/cifsacl.c25
-rw-r--r--fs/cifs/connect.c9
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cifs/sess.c2
-rw-r--r--fs/cifs/smb2pdu.c2
-rw-r--r--fs/dlm/plock.c6
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h5
-rw-r--r--fs/ext4/crypto_key.c4
-rw-r--r--fs/f2fs/checkpoint.c49
-rw-r--r--fs/f2fs/crypto_key.c4
-rw-r--r--fs/f2fs/data.c176
-rw-r--r--fs/f2fs/debug.c36
-rw-r--r--fs/f2fs/dir.c19
-rw-r--r--fs/f2fs/extent_cache.c195
-rw-r--r--fs/f2fs/f2fs.h86
-rw-r--r--fs/f2fs/file.c327
-rw-r--r--fs/f2fs/gc.c77
-rw-r--r--fs/f2fs/gc.h6
-rw-r--r--fs/f2fs/inline.c42
-rw-r--r--fs/f2fs/inode.c8
-rw-r--r--fs/f2fs/namei.c19
-rw-r--r--fs/f2fs/node.c26
-rw-r--r--fs/f2fs/node.h4
-rw-r--r--fs/f2fs/recovery.c15
-rw-r--r--fs/f2fs/segment.c206
-rw-r--r--fs/f2fs/segment.h4
-rw-r--r--fs/f2fs/super.c37
-rw-r--r--fs/file.c64
-rw-r--r--fs/fscache/object-list.c4
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/gfs2/file.c8
-rw-r--r--fs/lockd/clntproc.c13
-rw-r--r--fs/locks.c106
-rw-r--r--fs/namei.c7
-rw-r--r--fs/nfs/file.c13
-rw-r--r--fs/nfs/nfs4idmap.c4
-rw-r--r--fs/nfs/nfs4proc.c13
-rw-r--r--fs/ocfs2/locks.c8
-rw-r--r--fs/pstore/Kconfig2
-rw-r--r--fs/pstore/Makefile6
-rw-r--r--fs/pstore/ftrace.c25
-rw-r--r--fs/pstore/inode.c11
-rw-r--r--fs/pstore/internal.h6
-rw-r--r--fs/pstore/platform.c47
-rw-r--r--fs/pstore/pmsg.c9
-rw-r--r--fs/pstore/ram.c19
-rw-r--r--fs/sysfs/group.c44
-rw-r--r--include/crypto/public_key.h1
-rw-r--r--include/dt-bindings/clock/at91.h1
-rw-r--r--include/dt-bindings/clock/bcm-ns2.h72
-rw-r--r--include/dt-bindings/clock/bcm-nsp.h51
-rw-r--r--include/dt-bindings/clock/bcm2835.h47
-rw-r--r--include/dt-bindings/clock/exynos7-clk.h43
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h3
-rw-r--r--include/dt-bindings/clock/imx6sl-clock.h3
-rw-r--r--include/dt-bindings/clock/imx6sx-clock.h3
-rw-r--r--include/dt-bindings/clock/imx7d-clock.h3
-rw-r--r--include/dt-bindings/clock/mt8173-clk.h104
-rw-r--r--include/dt-bindings/clock/qcom,gcc-apq8084.h6
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8916.h30
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8974.h3
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-apq8084.h10
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8974.h8
-rw-r--r--include/dt-bindings/clock/r8a7795-cpg-mssr.h63
-rw-r--r--include/dt-bindings/clock/renesas-cpg-mssr.h15
-rw-r--r--include/dt-bindings/clock/sun4i-a10-pll2.h53
-rw-r--r--include/dt-bindings/clock/vf610-clock.h3
-rw-r--r--include/keys/asymmetric-subtype.h2
-rw-r--r--include/keys/asymmetric-type.h15
-rw-r--r--include/keys/trusted-type.h14
-rw-r--r--include/keys/user-type.h8
-rw-r--r--include/kvm/arm_arch_timer.h4
-rw-r--r--include/kvm/arm_vgic.h16
-rw-r--r--include/linux/audit.h8
-rw-r--r--include/linux/backing-dev.h5
-rw-r--r--include/linux/cgroup-defs.h76
-rw-r--r--include/linux/cgroup.h129
-rw-r--r--include/linux/clk-provider.h50
-rw-r--r--include/linux/clk/at91_pmc.h22
-rw-r--r--include/linux/dma-iommu.h85
-rw-r--r--include/linux/dma_remapping.h8
-rw-r--r--include/linux/fs.h21
-rw-r--r--include/linux/hugetlb_cgroup.h4
-rw-r--r--include/linux/hyperv.h1
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/intel-iommu.h139
-rw-r--r--include/linux/intel-svm.h121
-rw-r--r--include/linux/iommu-common.h1
-rw-r--r--include/linux/iommu.h8
-rw-r--r--include/linux/irqbypass.h90
-rw-r--r--include/linux/jump_label.h18
-rw-r--r--include/linux/key-type.h3
-rw-r--r--include/linux/key.h33
-rw-r--r--include/linux/kvm_host.h42
-rw-r--r--include/linux/kvm_irqfd.h71
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/memcontrol.h8
-rw-r--r--include/linux/mfd/axp20x.h24
-rw-r--r--include/linux/mfd/tps6105x.h10
-rw-r--r--include/linux/power/bq27x00_battery.h19
-rw-r--r--include/linux/power/bq27xxx_battery.h31
-rw-r--r--include/linux/power/charger-manager.h8
-rw-r--r--include/linux/pstore.h14
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/sched.h12
-rw-r--r--include/linux/spi/spi.h44
-rw-r--r--include/linux/spi/spi_bitbang.h2
-rw-r--r--include/linux/sysfs.h11
-rw-r--r--include/linux/tpm.h26
-rw-r--r--include/media/davinci/vpbe_display.h3
-rw-r--r--include/media/lirc_dev.h120
-rw-r--r--include/media/media-entity.h7
-rw-r--r--include/media/soc_camera.h2
-rw-r--r--include/media/tuner-types.h182
-rw-r--r--include/media/tuner.h152
-rw-r--r--include/media/tveeprom.h83
-rw-r--r--include/media/v4l2-dv-timings.h34
-rw-r--r--include/media/v4l2-ioctl.h8
-rw-r--r--include/media/v4l2-mem2mem.h11
-rw-r--r--include/media/videobuf2-core.h235
-rw-r--r--include/media/videobuf2-dma-contig.h2
-rw-r--r--include/media/videobuf2-dma-sg.h2
-rw-r--r--include/media/videobuf2-dvb.h8
-rw-r--r--include/media/videobuf2-memops.h2
-rw-r--r--include/media/videobuf2-v4l2.h149
-rw-r--r--include/media/videobuf2-vmalloc.h2
-rw-r--r--include/trace/events/f2fs.h69
-rw-r--r--include/trace/events/filelock.h38
-rw-r--r--include/trace/events/v4l2.h63
-rw-r--r--include/trace/events/vb2.h65
-rw-r--r--include/uapi/linux/kvm.h7
-rw-r--r--include/uapi/linux/v4l2-controls.h1
-rw-r--r--include/uapi/linux/videodev2.h34
-rw-r--r--kernel/.gitignore1
-rw-r--r--kernel/audit.c42
-rw-r--r--kernel/audit.h2
-rw-r--r--kernel/audit_tree.c6
-rw-r--r--kernel/auditfilter.c14
-rw-r--r--kernel/cgroup.c1297
-rw-r--r--kernel/cgroup_pids.c8
-rw-r--r--kernel/cpuset.c72
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/module_signing.c1
-rw-r--r--kernel/printk/printk.c1
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/cputime.c2
-rw-r--r--kernel/workqueue.c26
-rw-r--r--lib/digsig.c7
-rw-r--r--lib/iommu-common.c10
-rw-r--r--mm/memcontrol.c27
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/ceph/ceph_common.c2
-rw-r--r--net/ceph/crypto.c6
-rw-r--r--net/dns_resolver/dns_key.c20
-rw-r--r--net/dns_resolver/dns_query.c7
-rw-r--r--net/dns_resolver/internal.h8
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/rxrpc/ar-key.c32
-rw-r--r--net/rxrpc/ar-output.c2
-rw-r--r--net/rxrpc/ar-security.c4
-rw-r--r--net/rxrpc/rxkad.c16
-rw-r--r--scripts/.gitignore1
-rw-r--r--scripts/Makefile7
-rw-r--r--scripts/check-lc_ctype.c11
-rwxr-xr-xscripts/extract-module-sig.pl136
-rwxr-xr-xscripts/extract-sys-certs.pl144
-rwxr-xr-xscripts/kernel-doc144
-rw-r--r--security/apparmor/Kconfig2
-rw-r--r--security/integrity/digsig.c2
-rw-r--r--security/integrity/evm/evm_crypto.c2
-rw-r--r--security/keys/big_key.c47
-rw-r--r--security/keys/encrypted-keys/encrypted.c18
-rw-r--r--security/keys/encrypted-keys/encrypted.h4
-rw-r--r--security/keys/encrypted-keys/masterkey_trusted.c4
-rw-r--r--security/keys/key.c20
-rw-r--r--security/keys/keyctl.c12
-rw-r--r--security/keys/keyring.c12
-rw-r--r--security/keys/process_keys.c4
-rw-r--r--security/keys/request_key.c4
-rw-r--r--security/keys/request_key_auth.c12
-rw-r--r--security/keys/trusted.c42
-rw-r--r--security/keys/trusted.h11
-rw-r--r--security/keys/user_defined.c14
-rw-r--r--security/selinux/Kconfig4
-rw-r--r--security/selinux/hooks.c27
-rw-r--r--security/selinux/include/security.h2
-rw-r--r--security/selinux/selinuxfs.c26
-rw-r--r--security/selinux/ss/services.c22
-rw-r--r--security/smack/smack.h4
-rw-r--r--security/smack/smack_access.c6
-rw-r--r--security/smack/smack_lsm.c67
-rw-r--r--security/smack/smackfs.c208
-rw-r--r--sound/soc/codecs/ad1836.c1
-rw-r--r--sound/soc/codecs/ad193x-spi.c1
-rw-r--r--sound/soc/codecs/adau1761-spi.c1
-rw-r--r--sound/soc/codecs/adau1781-spi.c1
-rw-r--r--sound/soc/codecs/adau1977-spi.c1
-rw-r--r--sound/soc/codecs/adav801.c1
-rw-r--r--sound/soc/codecs/ak4104.c1
-rw-r--r--sound/soc/codecs/cs4271-spi.c1
-rw-r--r--sound/soc/codecs/da7210.c1
-rw-r--r--sound/soc/codecs/pcm1792a.c1
-rw-r--r--sound/soc/codecs/pcm512x-spi.c1
-rw-r--r--sound/soc/codecs/rt5677-spi.c1
-rw-r--r--sound/soc/codecs/ssm2602-spi.c1
-rw-r--r--sound/soc/codecs/tlv320aic23-spi.c1
-rw-r--r--sound/soc/codecs/tlv320aic26.c1
-rw-r--r--sound/soc/codecs/wm0010.c1
-rw-r--r--sound/soc/codecs/wm8510.c1
-rw-r--r--sound/soc/codecs/wm8711.c1
-rw-r--r--sound/soc/codecs/wm8728.c1
-rw-r--r--sound/soc/codecs/wm8731.c1
-rw-r--r--sound/soc/codecs/wm8737.c1
-rw-r--r--sound/soc/codecs/wm8741.c1
-rw-r--r--sound/soc/codecs/wm8750.c1
-rw-r--r--sound/soc/codecs/wm8753.c1
-rw-r--r--sound/soc/codecs/wm8770.c1
-rw-r--r--sound/soc/codecs/wm8776.c1
-rw-r--r--sound/soc/codecs/wm8804-spi.c1
-rw-r--r--sound/soc/codecs/wm8900.c1
-rw-r--r--sound/soc/codecs/wm8983.c1
-rw-r--r--sound/soc/codecs/wm8985.c1
-rw-r--r--sound/soc/codecs/wm8988.c1
-rw-r--r--sound/soc/codecs/wm8995.c1
-rw-r--r--virt/Makefile1
-rw-r--r--virt/kvm/Kconfig5
-rw-r--r--virt/kvm/arm/arch_timer.c173
-rw-r--r--virt/kvm/arm/trace.h63
-rw-r--r--virt/kvm/arm/vgic-v2.c6
-rw-r--r--virt/kvm/arm/vgic-v3.c6
-rw-r--r--virt/kvm/arm/vgic.c308
-rw-r--r--virt/kvm/async_pf.c4
-rw-r--r--virt/kvm/eventfd.c190
-rw-r--r--virt/kvm/irqchip.c18
-rw-r--r--virt/kvm/kvm_main.c11
-rw-r--r--virt/lib/Kconfig2
-rw-r--r--virt/lib/Makefile1
-rw-r--r--virt/lib/irqbypass.c257
1093 files changed, 35714 insertions, 16830 deletions
diff --git a/Documentation/ABI/stable/sysfs-class-tpm b/Documentation/ABI/stable/sysfs-class-tpm
index 9f790eebb5d2..c0e23830f56a 100644
--- a/Documentation/ABI/stable/sysfs-class-tpm
+++ b/Documentation/ABI/stable/sysfs-class-tpm
@@ -116,7 +116,7 @@ Description: The "pubek" property will return the TPM's public endorsement
owner's authorization. Since the TPM driver doesn't store any
secrets, it can't authorize its own request for the pubek,
making it unaccessible. The public endorsement key is gener-
- ated at TPM menufacture time and exists for the life of the
+ ated at TPM manufacture time and exists for the life of the
chip.
Example output:
@@ -163,7 +163,7 @@ Date: April 2006
KernelVersion: 2.6.17
Contact: tpmdd-devel@lists.sf.net
Description: The "temp_deactivated" property returns a '1' if the chip has
- been temporarily dectivated, usually until the next power
+ been temporarily deactivated, usually until the next power
cycle. Whether a warm boot (reboot) will clear a TPM chip
from a temp_deactivated state is platform specific.
diff --git a/Documentation/ABI/stable/sysfs-firmware-opal-elog b/Documentation/ABI/stable/sysfs-firmware-opal-elog
index e1f3058f5954..2536434d49d0 100644
--- a/Documentation/ABI/stable/sysfs-firmware-opal-elog
+++ b/Documentation/ABI/stable/sysfs-firmware-opal-elog
@@ -57,4 +57,4 @@ Description:
Shortly after acknowledging it, the log
entry will be removed from sysfs.
Reading this file will list the supported
- operations (curently just acknowledge). \ No newline at end of file
+ operations (currently just acknowledge).
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
index 369d2a2d7d3e..fa05719f9981 100644
--- a/Documentation/ABI/testing/sysfs-class-power
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -74,3 +74,61 @@ Description:
Valid values:
- 0 - 70 (minutes), step by 10 (rounded down)
+
+What: /sys/class/power_supply/bq24257-charger/ovp_voltage
+Date: October 2015
+KernelVersion: 4.4.0
+Contact: Andreas Dannenberg <dannenberg@ti.com>
+Description:
+ This entry configures the overvoltage protection feature of bq24257-
+ type charger devices. This feature protects the device and other
+ components against damage from overvoltage on the input supply. See
+ device datasheet for details.
+
+ Valid values:
+ - 6000000, 6500000, 7000000, 8000000, 9000000, 9500000, 10000000,
+ 10500000 (all uV)
+
+What: /sys/class/power_supply/bq24257-charger/in_dpm_voltage
+Date: October 2015
+KernelVersion: 4.4.0
+Contact: Andreas Dannenberg <dannenberg@ti.com>
+Description:
+ This entry configures the input dynamic power path management voltage of
+ bq24257-type charger devices. Once the supply drops to the configured
+ voltage, the input current limit is reduced down to prevent the further
+ drop of the supply. When the IC enters this mode, the charge current is
+ lower than the set value. See device datasheet for details.
+
+ Valid values:
+ - 4200000, 4280000, 4360000, 4440000, 4520000, 4600000, 4680000,
+ 4760000 (all uV)
+
+What: /sys/class/power_supply/bq24257-charger/high_impedance_enable
+Date: October 2015
+KernelVersion: 4.4.0
+Contact: Andreas Dannenberg <dannenberg@ti.com>
+Description:
+ This entry allows enabling the high-impedance mode of bq24257-type
+ charger devices. If enabled, it places the charger IC into low power
+ standby mode with the switch mode controller disabled. When disabled,
+ the charger operates normally. See device datasheet for details.
+
+ Valid values:
+ - 1: enabled
+ - 0: disabled
+
+What: /sys/class/power_supply/bq24257-charger/sysoff_enable
+Date: October 2015
+KernelVersion: 4.4.0
+Contact: Andreas Dannenberg <dannenberg@ti.com>
+Description:
+ This entry allows enabling the sysoff mode of bq24257-type charger
+ devices. If enabled and the input is removed, the internal battery FET
+ is turned off in order to reduce the leakage from the BAT pin to less
+ than 1uA. Note that on some devices/systems this disconnects the battery
+ from the system. See device datasheet for details.
+
+ Valid values:
+ - 1: enabled
+ - 0: disabled
diff --git a/Documentation/ABI/testing/sysfs-driver-ppi b/Documentation/ABI/testing/sysfs-driver-ppi
index 7d1435bc976c..9921ef285899 100644
--- a/Documentation/ABI/testing/sysfs-driver-ppi
+++ b/Documentation/ABI/testing/sysfs-driver-ppi
@@ -1,4 +1,4 @@
-What: /sys/devices/pnp0/<bus-num>/ppi/
+What: /sys/class/tpm/tpmX/ppi/
Date: August 2012
Kernel Version: 3.6
Contact: xiaoyan.zhang@intel.com
@@ -8,9 +8,14 @@ Description:
folder makes sense. The folder path can be got by command
'find /sys/ -name 'pcrs''. For the detail information of PPI,
please refer to the PPI specification from
+
http://www.trustedcomputinggroup.org/
-What: /sys/devices/pnp0/<bus-num>/ppi/version
+ In Linux 4.2 ppi was moved to the character device directory.
+ A symlink from tpmX/device/ppi to tpmX/ppi to provide backwards
+ compatibility.
+
+What: /sys/class/tpm/tpmX/ppi/version
Date: August 2012
Contact: xiaoyan.zhang@intel.com
Description:
@@ -18,7 +23,7 @@ Description:
platform.
This file is readonly.
-What: /sys/devices/pnp0/<bus-num>/ppi/request
+What: /sys/class/tpm/tpmX/ppi/request
Date: August 2012
Contact: xiaoyan.zhang@intel.com
Description:
@@ -28,7 +33,7 @@ Description:
integer value range from 1 to 160, and 0 means no request.
This file can be read and written.
-What: /sys/devices/pnp0/00:<bus-num>/ppi/response
+What: /sys/class/tpm/tpmX/ppi/response
Date: August 2012
Contact: xiaoyan.zhang@intel.com
Description:
@@ -37,7 +42,7 @@ Description:
: <response description>".
This file is readonly.
-What: /sys/devices/pnp0/<bus-num>/ppi/transition_action
+What: /sys/class/tpm/tpmX/ppi/transition_action
Date: August 2012
Contact: xiaoyan.zhang@intel.com
Description:
@@ -47,7 +52,7 @@ Description:
description>".
This file is readonly.
-What: /sys/devices/pnp0/<bus-num>/ppi/tcg_operations
+What: /sys/class/tpm/tpmX/ppi/tcg_operations
Date: August 2012
Contact: xiaoyan.zhang@intel.com
Description:
@@ -58,7 +63,7 @@ Description:
This attribute is only supported by PPI version 1.2+.
This file is readonly.
-What: /sys/devices/pnp0/<bus-num>/ppi/vs_operations
+What: /sys/class/tpm/tpmX/ppi/vs_operations
Date: August 2012
Contact: xiaoyan.zhang@intel.com
Description:
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 2c4cc42006e8..0345f2d1c727 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -80,3 +80,15 @@ Date: February 2015
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
Controls the trimming rate in batch mode.
+
+What: /sys/fs/f2fs/<disk>/cp_interval
+Date: October 2015
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+ Controls the checkpoint timing.
+
+What: /sys/fs/f2fs/<disk>/ra_nid_pages
+Date: October 2015
+Contact: "Chao Yu" <chao2.yu@samsung.com>
+Description:
+ Controls the count of nid pages to be readaheaded.
diff --git a/Documentation/Changes b/Documentation/Changes
index f447f0516f07..ec97b77c8b00 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -44,6 +44,7 @@ o grub 0.93 # grub --version || grub-insta
o mcelog 0.6 # mcelog --version
o iptables 1.4.2 # iptables -V
o openssl & libcrypto 1.0.0 # openssl version
+o bc 1.06.95 # bc --version
Kernel compilation
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 55b70b903ead..d69b3fc64e14 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -681,6 +681,11 @@ or:
as appropriate.
+PLEASE NOTE: The 'nents' argument to dma_sync_sg_for_cpu() and
+ dma_sync_sg_for_device() must be the same passed to
+ dma_map_sg(). It is _NOT_ the count returned by
+ dma_map_sg().
+
After the last DMA transfer call one of the DMA unmap routines
dma_unmap_{single,sg}(). If you don't touch the data from the first
dma_map_*() call till dma_unmap_*(), then you don't have to call the
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index edccacd4f048..8d065d6ec956 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -340,7 +340,7 @@ accessed sg->address and sg->length as shown above.
void
dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction)
+ int nents, enum dma_data_direction direction)
Unmap the previously mapped scatter/gather list. All the parameters
must be the same as those and passed in to the scatter/gather mapping
@@ -356,10 +356,10 @@ void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
Synchronise a single contiguous or scatter/gather mapping for the CPU
diff --git a/Documentation/DocBook/.gitignore b/Documentation/DocBook/.gitignore
index 7ebd5465d927..e05da3f7aa21 100644
--- a/Documentation/DocBook/.gitignore
+++ b/Documentation/DocBook/.gitignore
@@ -11,5 +11,7 @@
*.png
*.gif
*.svg
+*.proc
+*.db
media-indices.tmpl
media-entities.tmpl
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 93eff64387cd..d2544961b67a 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -69,6 +69,12 @@ installmandocs: mandocs
KERNELDOCXMLREF = $(srctree)/scripts/kernel-doc-xml-ref
KERNELDOC = $(srctree)/scripts/kernel-doc
DOCPROC = $(objtree)/scripts/docproc
+CHECK_LC_CTYPE = $(objtree)/scripts/check-lc_ctype
+
+# Use a fixed encoding - UTF-8 if the C library has support built-in
+# or ASCII if not
+LC_CTYPE := $(call try-run, LC_CTYPE=C.UTF-8 $(CHECK_LC_CTYPE),C.UTF-8,C)
+export LC_CTYPE
XMLTOFLAGS = -m $(srctree)/$(src)/stylesheet.xsl
XMLTOFLAGS += --skip-validation
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 1d6008d51b55..42a2d8593e39 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -221,6 +221,9 @@ X!Isound/sound_firmware.c
<title>Media Devices</title>
<sect1><title>Video2Linux devices</title>
+!Iinclude/media/tuner.h
+!Iinclude/media/tuner-types.h
+!Iinclude/media/tveeprom.h
!Iinclude/media/v4l2-async.h
!Iinclude/media/v4l2-ctrls.h
!Iinclude/media/v4l2-dv-timings.h
@@ -231,6 +234,7 @@ X!Isound/sound_firmware.c
!Iinclude/media/v4l2-of.h
!Iinclude/media/v4l2-subdev.h
!Iinclude/media/videobuf2-core.h
+!Iinclude/media/videobuf2-v4l2.h
!Iinclude/media/videobuf2-memops.h
</sect1>
<sect1><title>Digital TV (DVB) devices</title>
@@ -239,15 +243,82 @@ X!Isound/sound_firmware.c
!Idrivers/media/dvb-core/dvb_math.h
!Idrivers/media/dvb-core/dvb_ringbuffer.h
!Idrivers/media/dvb-core/dvbdev.h
- </sect1>
- <sect1><title>Remote Controller devices</title>
+ <sect1><title>Digital TV Demux API</title>
+ <para>The kernel demux API defines a driver-internal interface for
+ registering low-level, hardware specific driver to a hardware
+ independent demux layer. It is only of interest for Digital TV
+ device driver writers. The header file for this API is named
+ <constant>demux.h</constant> and located in
+ <constant>drivers/media/dvb-core</constant>.</para>
+
+ <para>The demux API should be implemented for each demux in the
+ system. It is used to select the TS source of a demux and to manage
+ the demux resources. When the demux client allocates a resource via
+ the demux API, it receives a pointer to the API of that
+ resource.</para>
+ <para>Each demux receives its TS input from a DVB front-end or from
+ memory, as set via this demux API. In a system with more than one
+ front-end, the API can be used to select one of the DVB front-ends
+ as a TS source for a demux, unless this is fixed in the HW platform.
+ The demux API only controls front-ends regarding to their connections
+ with demuxes; the APIs used to set the other front-end parameters,
+ such as tuning, are not defined in this document.</para>
+ <para>The functions that implement the abstract interface demux should
+ be defined static or module private and registered to the Demux
+ core for external access. It is not necessary to implement every
+ function in the struct <constant>dmx_demux</constant>. For example,
+ a demux interface might support Section filtering, but not PES
+ filtering. The API client is expected to check the value of any
+ function pointer before calling the function: the value of NULL means
+ that the &#8220;function is not available&#8221;.</para>
+ <para>Whenever the functions of the demux API modify shared data,
+ the possibilities of lost update and race condition problems should
+ be addressed, e.g. by protecting parts of code with mutexes.</para>
+ <para>Note that functions called from a bottom half context must not
+ sleep. Even a simple memory allocation without using GFP_ATOMIC can
+ result in a kernel thread being put to sleep if swapping is needed.
+ For example, the Linux kernel calls the functions of a network device
+ interface from a bottom half context. Thus, if a demux API function
+ is called from network device code, the function must not sleep.
+ </para>
+ </sect1>
+
+ <section id="demux_callback_api">
+ <title>Demux Callback API</title>
+ <para>This kernel-space API comprises the callback functions that
+ deliver filtered data to the demux client. Unlike the other DVB
+ kABIs, these functions are provided by the client and called from
+ the demux code.</para>
+ <para>The function pointers of this abstract interface are not
+ packed into a structure as in the other demux APIs, because the
+ callback functions are registered and used independent of each
+ other. As an example, it is possible for the API client to provide
+ several callback functions for receiving TS packets and no
+ callbacks for PES packets or sections.</para>
+ <para>The functions that implement the callback API need not be
+ re-entrant: when a demux driver calls one of these functions,
+ the driver is not allowed to call the function again before
+ the original call returns. If a callback is triggered by a
+ hardware interrupt, it is recommended to use the Linux
+ &#8220;bottom half&#8221; mechanism or start a tasklet instead of
+ making the callback function call directly from a hardware
+ interrupt.</para>
+ <para>This mechanism is implemented by
+ <link linkend='API-dmx-ts-cb'>dmx_ts_cb()</link> and
+ <link linkend='API-dmx-section-cb'>dmx_section_cb()</link>.</para>
+ </section>
+
+!Idrivers/media/dvb-core/demux.h
+ </sect1>
+ <sect1><title>Remote Controller devices</title>
!Iinclude/media/rc-core.h
- </sect1>
- <sect1><title>Media Controller devices</title>
+!Iinclude/media/lirc_dev.h
+ </sect1>
+ <sect1><title>Media Controller devices</title>
!Iinclude/media/media-device.h
!Iinclude/media/media-devnode.h
!Iinclude/media/media-entity.h
- </sect1>
+ </sect1>
</chapter>
diff --git a/Documentation/DocBook/media/dvb/dvbapi.xml b/Documentation/DocBook/media/dvb/dvbapi.xml
index 858fd7d17104..8576481e20ae 100644
--- a/Documentation/DocBook/media/dvb/dvbapi.xml
+++ b/Documentation/DocBook/media/dvb/dvbapi.xml
@@ -125,9 +125,6 @@ Added ISDB-T test originally written by Patrick Boettcher
&sub-audio;
</section>
</chapter>
- <chapter id="dvb_kdapi">
- &sub-kdapi;
- </chapter>
<chapter id="dvb_examples">
&sub-examples;
</chapter>
diff --git a/Documentation/DocBook/media/dvb/kdapi.xml b/Documentation/DocBook/media/dvb/kdapi.xml
deleted file mode 100644
index 68bcd33a82c3..000000000000
--- a/Documentation/DocBook/media/dvb/kdapi.xml
+++ /dev/null
@@ -1,2309 +0,0 @@
-<title>Kernel Demux API</title>
-<para>The kernel demux API defines a driver-internal interface for registering low-level,
-hardware specific driver to a hardware independent demux layer. It is only of interest for
-DVB device driver writers. The header file for this API is named <constant>demux.h</constant> and located in
-<constant>">drivers/media/dvb-core</constant>.
-</para>
-<para>Maintainer note: This section must be reviewed. It is probably out of date.
-</para>
-
-<section id="kernel_demux_data_types">
-<title>Kernel Demux Data Types</title>
-
-
-<section id="dmx_success_t">
-<title>dmx_success_t</title>
- <programlisting>
- typedef enum {
- DMX_OK = 0, /&#x22C6; Received Ok &#x22C6;/
- DMX_LENGTH_ERROR, /&#x22C6; Incorrect length &#x22C6;/
- DMX_OVERRUN_ERROR, /&#x22C6; Receiver ring buffer overrun &#x22C6;/
- DMX_CRC_ERROR, /&#x22C6; Incorrect CRC &#x22C6;/
- DMX_FRAME_ERROR, /&#x22C6; Frame alignment error &#x22C6;/
- DMX_FIFO_ERROR, /&#x22C6; Receiver FIFO overrun &#x22C6;/
- DMX_MISSED_ERROR /&#x22C6; Receiver missed packet &#x22C6;/
- } dmx_success_t;
-</programlisting>
-
-</section>
-<section id="ts_filter_types">
-<title>TS filter types</title>
- <programlisting>
- /&#x22C6;--------------------------------------------------------------------------&#x22C6;/
- /&#x22C6; TS packet reception &#x22C6;/
- /&#x22C6;--------------------------------------------------------------------------&#x22C6;/
-
- /&#x22C6; TS filter type for set_type() &#x22C6;/
-
- #define TS_PACKET 1 /&#x22C6; send TS packets (188 bytes) to callback (default) &#x22C6;/
- #define TS_PAYLOAD_ONLY 2 /&#x22C6; in case TS_PACKET is set, only send the TS
- payload (&#x003C;=184 bytes per packet) to callback &#x22C6;/
- #define TS_DECODER 4 /&#x22C6; send stream to built-in decoder (if present) &#x22C6;/
-</programlisting>
-
-</section>
-<section id="dmx_ts_pes_t">
-<title>dmx_ts_pes_t</title>
-<para>The structure
-</para>
-<programlisting>
- typedef enum
- {
- DMX_TS_PES_AUDIO, /&#x22C6; also send packets to audio decoder (if it exists) &#x22C6;/
- DMX_TS_PES_VIDEO, /&#x22C6; ... &#x22C6;/
- DMX_TS_PES_TELETEXT,
- DMX_TS_PES_SUBTITLE,
- DMX_TS_PES_PCR,
- DMX_TS_PES_OTHER,
- } dmx_ts_pes_t;
-</programlisting>
-<para>describes the PES type for filters which write to a built-in decoder. The correspond (and
-should be kept identical) to the types in the demux device.
-</para>
-<programlisting>
- struct dmx_ts_feed_s {
- int is_filtering; /&#x22C6; Set to non-zero when filtering in progress &#x22C6;/
- struct dmx_demux_s&#x22C6; parent; /&#x22C6; Back-pointer &#x22C6;/
- void&#x22C6; priv; /&#x22C6; Pointer to private data of the API client &#x22C6;/
- int (&#x22C6;set) (struct dmx_ts_feed_s&#x22C6; feed,
- __u16 pid,
- size_t callback_length,
- size_t circular_buffer_size,
- int descramble,
- struct timespec timeout);
- int (&#x22C6;start_filtering) (struct dmx_ts_feed_s&#x22C6; feed);
- int (&#x22C6;stop_filtering) (struct dmx_ts_feed_s&#x22C6; feed);
- int (&#x22C6;set_type) (struct dmx_ts_feed_s&#x22C6; feed,
- int type,
- dmx_ts_pes_t pes_type);
- };
-
- typedef struct dmx_ts_feed_s dmx_ts_feed_t;
-</programlisting>
- <programlisting>
- /&#x22C6;--------------------------------------------------------------------------&#x22C6;/
- /&#x22C6; PES packet reception (not supported yet) &#x22C6;/
- /&#x22C6;--------------------------------------------------------------------------&#x22C6;/
-
- typedef struct dmx_pes_filter_s {
- struct dmx_pes_s&#x22C6; parent; /&#x22C6; Back-pointer &#x22C6;/
- void&#x22C6; priv; /&#x22C6; Pointer to private data of the API client &#x22C6;/
- } dmx_pes_filter_t;
-</programlisting>
- <programlisting>
- typedef struct dmx_pes_feed_s {
- int is_filtering; /&#x22C6; Set to non-zero when filtering in progress &#x22C6;/
- struct dmx_demux_s&#x22C6; parent; /&#x22C6; Back-pointer &#x22C6;/
- void&#x22C6; priv; /&#x22C6; Pointer to private data of the API client &#x22C6;/
- int (&#x22C6;set) (struct dmx_pes_feed_s&#x22C6; feed,
- __u16 pid,
- size_t circular_buffer_size,
- int descramble,
- struct timespec timeout);
- int (&#x22C6;start_filtering) (struct dmx_pes_feed_s&#x22C6; feed);
- int (&#x22C6;stop_filtering) (struct dmx_pes_feed_s&#x22C6; feed);
- int (&#x22C6;allocate_filter) (struct dmx_pes_feed_s&#x22C6; feed,
- dmx_pes_filter_t&#x22C6;&#x22C6; filter);
- int (&#x22C6;release_filter) (struct dmx_pes_feed_s&#x22C6; feed,
- dmx_pes_filter_t&#x22C6; filter);
- } dmx_pes_feed_t;
-</programlisting>
- <programlisting>
- typedef struct {
- __u8 filter_value [DMX_MAX_FILTER_SIZE];
- __u8 filter_mask [DMX_MAX_FILTER_SIZE];
- struct dmx_section_feed_s&#x22C6; parent; /&#x22C6; Back-pointer &#x22C6;/
- void&#x22C6; priv; /&#x22C6; Pointer to private data of the API client &#x22C6;/
- } dmx_section_filter_t;
-</programlisting>
- <programlisting>
- struct dmx_section_feed_s {
- int is_filtering; /&#x22C6; Set to non-zero when filtering in progress &#x22C6;/
- struct dmx_demux_s&#x22C6; parent; /&#x22C6; Back-pointer &#x22C6;/
- void&#x22C6; priv; /&#x22C6; Pointer to private data of the API client &#x22C6;/
- int (&#x22C6;set) (struct dmx_section_feed_s&#x22C6; feed,
- __u16 pid,
- size_t circular_buffer_size,
- int descramble,
- int check_crc);
- int (&#x22C6;allocate_filter) (struct dmx_section_feed_s&#x22C6; feed,
- dmx_section_filter_t&#x22C6;&#x22C6; filter);
- int (&#x22C6;release_filter) (struct dmx_section_feed_s&#x22C6; feed,
- dmx_section_filter_t&#x22C6; filter);
- int (&#x22C6;start_filtering) (struct dmx_section_feed_s&#x22C6; feed);
- int (&#x22C6;stop_filtering) (struct dmx_section_feed_s&#x22C6; feed);
- };
- typedef struct dmx_section_feed_s dmx_section_feed_t;
-
- /&#x22C6;--------------------------------------------------------------------------&#x22C6;/
- /&#x22C6; Callback functions &#x22C6;/
- /&#x22C6;--------------------------------------------------------------------------&#x22C6;/
-
- typedef int (&#x22C6;dmx_ts_cb) ( __u8 &#x22C6; buffer1,
- size_t buffer1_length,
- __u8 &#x22C6; buffer2,
- size_t buffer2_length,
- dmx_ts_feed_t&#x22C6; source,
- dmx_success_t success);
-
- typedef int (&#x22C6;dmx_section_cb) ( __u8 &#x22C6; buffer1,
- size_t buffer1_len,
- __u8 &#x22C6; buffer2,
- size_t buffer2_len,
- dmx_section_filter_t &#x22C6; source,
- dmx_success_t success);
-
- typedef int (&#x22C6;dmx_pes_cb) ( __u8 &#x22C6; buffer1,
- size_t buffer1_len,
- __u8 &#x22C6; buffer2,
- size_t buffer2_len,
- dmx_pes_filter_t&#x22C6; source,
- dmx_success_t success);
-
- /&#x22C6;--------------------------------------------------------------------------&#x22C6;/
- /&#x22C6; DVB Front-End &#x22C6;/
- /&#x22C6;--------------------------------------------------------------------------&#x22C6;/
-
- typedef enum {
- DMX_OTHER_FE = 0,
- DMX_SATELLITE_FE,
- DMX_CABLE_FE,
- DMX_TERRESTRIAL_FE,
- DMX_LVDS_FE,
- DMX_ASI_FE, /&#x22C6; DVB-ASI interface &#x22C6;/
- DMX_MEMORY_FE
- } dmx_frontend_source_t;
-
- typedef struct {
- /&#x22C6; The following char&#x22C6; fields point to NULL terminated strings &#x22C6;/
- char&#x22C6; id; /&#x22C6; Unique front-end identifier &#x22C6;/
- char&#x22C6; vendor; /&#x22C6; Name of the front-end vendor &#x22C6;/
- char&#x22C6; model; /&#x22C6; Name of the front-end model &#x22C6;/
- struct list_head connectivity_list; /&#x22C6; List of front-ends that can
- be connected to a particular
- demux &#x22C6;/
- void&#x22C6; priv; /&#x22C6; Pointer to private data of the API client &#x22C6;/
- dmx_frontend_source_t source;
- } dmx_frontend_t;
-
- /&#x22C6;--------------------------------------------------------------------------&#x22C6;/
- /&#x22C6; MPEG-2 TS Demux &#x22C6;/
- /&#x22C6;--------------------------------------------------------------------------&#x22C6;/
-
- /&#x22C6;
- &#x22C6; Flags OR'ed in the capabilites field of struct dmx_demux_s.
- &#x22C6;/
-
- #define DMX_TS_FILTERING 1
- #define DMX_PES_FILTERING 2
- #define DMX_SECTION_FILTERING 4
- #define DMX_MEMORY_BASED_FILTERING 8 /&#x22C6; write() available &#x22C6;/
- #define DMX_CRC_CHECKING 16
- #define DMX_TS_DESCRAMBLING 32
- #define DMX_SECTION_PAYLOAD_DESCRAMBLING 64
- #define DMX_MAC_ADDRESS_DESCRAMBLING 128
-</programlisting>
-
-</section>
-<section id="demux_demux_t">
-<title>demux_demux_t</title>
- <programlisting>
- /&#x22C6;
- &#x22C6; DMX_FE_ENTRY(): Casts elements in the list of registered
- &#x22C6; front-ends from the generic type struct list_head
- &#x22C6; to the type &#x22C6; dmx_frontend_t
- &#x22C6;.
- &#x22C6;/
-
- #define DMX_FE_ENTRY(list) list_entry(list, dmx_frontend_t, connectivity_list)
-
- struct dmx_demux_s {
- /&#x22C6; The following char&#x22C6; fields point to NULL terminated strings &#x22C6;/
- char&#x22C6; id; /&#x22C6; Unique demux identifier &#x22C6;/
- char&#x22C6; vendor; /&#x22C6; Name of the demux vendor &#x22C6;/
- char&#x22C6; model; /&#x22C6; Name of the demux model &#x22C6;/
- __u32 capabilities; /&#x22C6; Bitfield of capability flags &#x22C6;/
- dmx_frontend_t&#x22C6; frontend; /&#x22C6; Front-end connected to the demux &#x22C6;/
- struct list_head reg_list; /&#x22C6; List of registered demuxes &#x22C6;/
- void&#x22C6; priv; /&#x22C6; Pointer to private data of the API client &#x22C6;/
- int users; /&#x22C6; Number of users &#x22C6;/
- int (&#x22C6;open) (struct dmx_demux_s&#x22C6; demux);
- int (&#x22C6;close) (struct dmx_demux_s&#x22C6; demux);
- int (&#x22C6;write) (struct dmx_demux_s&#x22C6; demux, const char&#x22C6; buf, size_t count);
- int (&#x22C6;allocate_ts_feed) (struct dmx_demux_s&#x22C6; demux,
- dmx_ts_feed_t&#x22C6;&#x22C6; feed,
- dmx_ts_cb callback);
- int (&#x22C6;release_ts_feed) (struct dmx_demux_s&#x22C6; demux,
- dmx_ts_feed_t&#x22C6; feed);
- int (&#x22C6;allocate_pes_feed) (struct dmx_demux_s&#x22C6; demux,
- dmx_pes_feed_t&#x22C6;&#x22C6; feed,
- dmx_pes_cb callback);
- int (&#x22C6;release_pes_feed) (struct dmx_demux_s&#x22C6; demux,
- dmx_pes_feed_t&#x22C6; feed);
- int (&#x22C6;allocate_section_feed) (struct dmx_demux_s&#x22C6; demux,
- dmx_section_feed_t&#x22C6;&#x22C6; feed,
- dmx_section_cb callback);
- int (&#x22C6;release_section_feed) (struct dmx_demux_s&#x22C6; demux,
- dmx_section_feed_t&#x22C6; feed);
- int (&#x22C6;descramble_mac_address) (struct dmx_demux_s&#x22C6; demux,
- __u8&#x22C6; buffer1,
- size_t buffer1_length,
- __u8&#x22C6; buffer2,
- size_t buffer2_length,
- __u16 pid);
- int (&#x22C6;descramble_section_payload) (struct dmx_demux_s&#x22C6; demux,
- __u8&#x22C6; buffer1,
- size_t buffer1_length,
- __u8&#x22C6; buffer2, size_t buffer2_length,
- __u16 pid);
- int (&#x22C6;add_frontend) (struct dmx_demux_s&#x22C6; demux,
- dmx_frontend_t&#x22C6; frontend);
- int (&#x22C6;remove_frontend) (struct dmx_demux_s&#x22C6; demux,
- dmx_frontend_t&#x22C6; frontend);
- struct list_head&#x22C6; (&#x22C6;get_frontends) (struct dmx_demux_s&#x22C6; demux);
- int (&#x22C6;connect_frontend) (struct dmx_demux_s&#x22C6; demux,
- dmx_frontend_t&#x22C6; frontend);
- int (&#x22C6;disconnect_frontend) (struct dmx_demux_s&#x22C6; demux);
-
-
- /&#x22C6; added because js cannot keep track of these himself &#x22C6;/
- int (&#x22C6;get_pes_pids) (struct dmx_demux_s&#x22C6; demux, __u16 &#x22C6;pids);
- };
- typedef struct dmx_demux_s dmx_demux_t;
-</programlisting>
-
-</section>
-<section id="demux_directory">
-<title>Demux directory</title>
- <programlisting>
- /&#x22C6;
- &#x22C6; DMX_DIR_ENTRY(): Casts elements in the list of registered
- &#x22C6; demuxes from the generic type struct list_head&#x22C6; to the type dmx_demux_t
- &#x22C6;.
- &#x22C6;/
-
- #define DMX_DIR_ENTRY(list) list_entry(list, dmx_demux_t, reg_list)
-
- int dmx_register_demux (dmx_demux_t&#x22C6; demux);
- int dmx_unregister_demux (dmx_demux_t&#x22C6; demux);
- struct list_head&#x22C6; dmx_get_demuxes (void);
-</programlisting>
- </section></section>
-<section id="demux_directory_api">
-<title>Demux Directory API</title>
-<para>The demux directory is a Linux kernel-wide facility for registering and accessing the
-MPEG-2 TS demuxes in the system. Run-time registering and unregistering of demux drivers
-is possible using this API.
-</para>
-<para>All demux drivers in the directory implement the abstract interface dmx_demux_t.
-</para>
-
-<section
-role="subsection"><title>dmx_register_demux()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function makes a demux driver interface available to the Linux kernel. It is
- usually called by the init_module() function of the kernel module that contains
- the demux driver. The caller of this function is responsible for allocating
- dynamic or static memory for the demux structure and for initializing its fields
- before calling this function. The memory allocated for the demux structure
- must not be freed before calling dmx_unregister_demux(),</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int dmx_register_demux ( dmx_demux_t &#x22C6;demux )</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_demux_t*
- demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux structure.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EEXIST</para>
-</entry><entry
- align="char">
-<para>A demux with the same value of the id field already stored
- in the directory.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENOSPC</para>
-</entry><entry
- align="char">
-<para>No space left in the directory.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>dmx_unregister_demux()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function is called to indicate that the given demux interface is no
- longer available. The caller of this function is responsible for freeing the
- memory of the demux structure, if it was dynamically allocated before calling
- dmx_register_demux(). The cleanup_module() function of the kernel module
- that contains the demux driver should call this function. Note that this function
- fails if the demux is currently in use, i.e., release_demux() has not been called
- for the interface.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int dmx_unregister_demux ( dmx_demux_t &#x22C6;demux )</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_demux_t*
- demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux structure which is to be
- unregistered.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ENODEV</para>
-</entry><entry
- align="char">
-<para>The specified demux is not registered in the demux
- directory.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EBUSY</para>
-</entry><entry
- align="char">
-<para>The specified demux is currently in use.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>dmx_get_demuxes()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Provides the caller with the list of registered demux interfaces, using the
- standard list structure defined in the include file linux/list.h. The include file
- demux.h defines the macro DMX_DIR_ENTRY() for converting an element of
- the generic type struct list_head* to the type dmx_demux_t*. The caller must
- not free the memory of any of the elements obtained via this function call.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>struct list_head &#x22C6;dmx_get_demuxes ()</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>none</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>struct list_head *</para>
-</entry><entry
- align="char">
-<para>A list of demux interfaces, or NULL in the case of an
- empty list.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
- </section></section>
-<section id="demux_api">
-<title>Demux API</title>
-<para>The demux API should be implemented for each demux in the system. It is used to select
-the TS source of a demux and to manage the demux resources. When the demux
-client allocates a resource via the demux API, it receives a pointer to the API of that
-resource.
-</para>
-<para>Each demux receives its TS input from a DVB front-end or from memory, as set via the
-demux API. In a system with more than one front-end, the API can be used to select one of
-the DVB front-ends as a TS source for a demux, unless this is fixed in the HW platform. The
-demux API only controls front-ends regarding their connections with demuxes; the APIs
-used to set the other front-end parameters, such as tuning, are not defined in this
-document.
-</para>
-<para>The functions that implement the abstract interface demux should be defined static or
-module private and registered to the Demux Directory for external access. It is not necessary
-to implement every function in the demux_t struct, however (for example, a demux interface
-might support Section filtering, but not TS or PES filtering). The API client is expected to
-check the value of any function pointer before calling the function: the value of NULL means
-&#8220;function not available&#8221;.
-</para>
-<para>Whenever the functions of the demux API modify shared data, the possibilities of lost
-update and race condition problems should be addressed, e.g. by protecting parts of code with
-mutexes. This is especially important on multi-processor hosts.
-</para>
-<para>Note that functions called from a bottom half context must not sleep, at least in the 2.2.x
-kernels. Even a simple memory allocation can result in a kernel thread being put to sleep if
-swapping is needed. For example, the Linux kernel calls the functions of a network device
-interface from a bottom half context. Thus, if a demux API function is called from network
-device code, the function must not sleep.
-</para>
-
-
-<section id="kdapi_fopen">
-<title>open()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function reserves the demux for use by the caller and, if necessary,
- initializes the demux. When the demux is no longer needed, the function close()
- should be called. It should be possible for multiple clients to access the demux
- at the same time. Thus, the function implementation should increment the
- demux usage count when open() is called and decrement it when close() is
- called.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int open ( demux_t&#x22C6; demux );</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>demux_t* demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EUSERS</para>
-</entry><entry
- align="char">
-<para>Maximum usage count reached.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section>
-<section id="kdapi_fclose">
-<title>close()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function reserves the demux for use by the caller and, if necessary,
- initializes the demux. When the demux is no longer needed, the function close()
- should be called. It should be possible for multiple clients to access the demux
- at the same time. Thus, the function implementation should increment the
- demux usage count when open() is called and decrement it when close() is
- called.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int close(demux_t&#x22C6; demux);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>demux_t* demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENODEV</para>
-</entry><entry
- align="char">
-<para>The demux was not in use.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section>
-<section id="kdapi_fwrite">
-<title>write()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function provides the demux driver with a memory buffer containing TS
- packets. Instead of receiving TS packets from the DVB front-end, the demux
- driver software will read packets from memory. Any clients of this demux
- with active TS, PES or Section filters will receive filtered data via the Demux
- callback API (see 0). The function returns when all the data in the buffer has
- been consumed by the demux. Demux hardware typically cannot read TS from
- memory. If this is the case, memory-based filtering has to be implemented
- entirely in software.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int write(demux_t&#x22C6; demux, const char&#x22C6; buf, size_t
- count);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>demux_t* demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>const char* buf</para>
-</entry><entry
- align="char">
-<para>Pointer to the TS data in kernel-space memory.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t length</para>
-</entry><entry
- align="char">
-<para>Length of the TS data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENOSYS</para>
-</entry><entry
- align="char">
-<para>The command is not implemented.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>allocate_ts_feed()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Allocates a new TS feed, which is used to filter the TS packets carrying a
- certain PID. The TS feed normally corresponds to a hardware PID filter on the
- demux chip.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int allocate_ts_feed(dmx_demux_t&#x22C6; demux,
- dmx_ts_feed_t&#x22C6;&#x22C6; feed, dmx_ts_cb callback);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>demux_t* demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_ts_feed_t**
- feed</para>
-</entry><entry
- align="char">
-<para>Pointer to the TS feed API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_ts_cb callback</para>
-</entry><entry
- align="char">
-<para>Pointer to the callback function for passing received TS
- packet</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EBUSY</para>
-</entry><entry
- align="char">
-<para>No more TS feeds available.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENOSYS</para>
-</entry><entry
- align="char">
-<para>The command is not implemented.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>release_ts_feed()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Releases the resources allocated with allocate_ts_feed(). Any filtering in
- progress on the TS feed should be stopped before calling this function.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int release_ts_feed(dmx_demux_t&#x22C6; demux,
- dmx_ts_feed_t&#x22C6; feed);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>demux_t* demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_ts_feed_t* feed</para>
-</entry><entry
- align="char">
-<para>Pointer to the TS feed API and instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>allocate_section_feed()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Allocates a new section feed, i.e. a demux resource for filtering and receiving
- sections. On platforms with hardware support for section filtering, a section
- feed is directly mapped to the demux HW. On other platforms, TS packets are
- first PID filtered in hardware and a hardware section filter then emulated in
- software. The caller obtains an API pointer of type dmx_section_feed_t as an
- out parameter. Using this API the caller can set filtering parameters and start
- receiving sections.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int allocate_section_feed(dmx_demux_t&#x22C6; demux,
- dmx_section_feed_t &#x22C6;&#x22C6;feed, dmx_section_cb callback);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>demux_t *demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_section_feed_t
- **feed</para>
-</entry><entry
- align="char">
-<para>Pointer to the section feed API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_section_cb
- callback</para>
-</entry><entry
- align="char">
-<para>Pointer to the callback function for passing received
- sections.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EBUSY</para>
-</entry><entry
- align="char">
-<para>No more section feeds available.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENOSYS</para>
-</entry><entry
- align="char">
-<para>The command is not implemented.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>release_section_feed()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Releases the resources allocated with allocate_section_feed(), including
- allocated filters. Any filtering in progress on the section feed should be stopped
- before calling this function.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int release_section_feed(dmx_demux_t&#x22C6; demux,
- dmx_section_feed_t &#x22C6;feed);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>demux_t *demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_section_feed_t
- *feed</para>
-</entry><entry
- align="char">
-<para>Pointer to the section feed API and instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>descramble_mac_address()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function runs a descrambling algorithm on the destination MAC
- address field of a DVB Datagram Section, replacing the original address
- with its un-encrypted version. Otherwise, the description on the function
- descramble_section_payload() applies also to this function.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int descramble_mac_address(dmx_demux_t&#x22C6; demux, __u8
- &#x22C6;buffer1, size_t buffer1_length, __u8 &#x22C6;buffer2,
- size_t buffer2_length, __u16 pid);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_demux_t
- *demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u8 *buffer1</para>
-</entry><entry
- align="char">
-<para>Pointer to the first byte of the section.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t buffer1_length</para>
-</entry><entry
- align="char">
-<para>Length of the section data, including headers and CRC,
- in buffer1.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u8* buffer2</para>
-</entry><entry
- align="char">
-<para>Pointer to the tail of the section data, or NULL. The
- pointer has a non-NULL value if the section wraps past
- the end of a circular buffer.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t buffer2_length</para>
-</entry><entry
- align="char">
-<para>Length of the section data, including headers and CRC,
- in buffer2.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u16 pid</para>
-</entry><entry
- align="char">
-<para>The PID on which the section was received. Useful
- for obtaining the descrambling key, e.g. from a DVB
- Common Access facility.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENOSYS</para>
-</entry><entry
- align="char">
-<para>No descrambling facility available.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>descramble_section_payload()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function runs a descrambling algorithm on the payload of a DVB
- Datagram Section, replacing the original payload with its un-encrypted
- version. The function will be called from the demux API implementation;
- the API client need not call this function directly. Section-level scrambling
- algorithms are currently standardized only for DVB-RCC (return channel
- over 2-directional cable TV network) systems. For all other DVB networks,
- encryption schemes are likely to be proprietary to each data broadcaster. Thus,
- it is expected that this function pointer will have the value of NULL (i.e.,
- function not available) in most demux API implementations. Nevertheless, it
- should be possible to use the function pointer as a hook for dynamically adding
- a &#8220;plug-in&#8221; descrambling facility to a demux driver.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>While this function is not needed with hardware-based section descrambling,
- the descramble_section_payload function pointer can be used to override the
- default hardware-based descrambling algorithm: if the function pointer has a
- non-NULL value, the corresponding function should be used instead of any
- descrambling hardware.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int descramble_section_payload(dmx_demux_t&#x22C6; demux,
- __u8 &#x22C6;buffer1, size_t buffer1_length, __u8 &#x22C6;buffer2,
- size_t buffer2_length, __u16 pid);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_demux_t
- *demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u8 *buffer1</para>
-</entry><entry
- align="char">
-<para>Pointer to the first byte of the section.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t buffer1_length</para>
-</entry><entry
- align="char">
-<para>Length of the section data, including headers and CRC,
- in buffer1.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u8 *buffer2</para>
-</entry><entry
- align="char">
-<para>Pointer to the tail of the section data, or NULL. The
- pointer has a non-NULL value if the section wraps past
- the end of a circular buffer.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t buffer2_length</para>
-</entry><entry
- align="char">
-<para>Length of the section data, including headers and CRC,
- in buffer2.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u16 pid</para>
-</entry><entry
- align="char">
-<para>The PID on which the section was received. Useful
- for obtaining the descrambling key, e.g. from a DVB
- Common Access facility.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENOSYS</para>
-</entry><entry
- align="char">
-<para>No descrambling facility available.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>add_frontend()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Registers a connectivity between a demux and a front-end, i.e., indicates that
- the demux can be connected via a call to connect_frontend() to use the given
- front-end as a TS source. The client of this function has to allocate dynamic or
- static memory for the frontend structure and initialize its fields before calling
- this function. This function is normally called during the driver initialization.
- The caller must not free the memory of the frontend struct before successfully
- calling remove_frontend().</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int add_frontend(dmx_demux_t &#x22C6;demux, dmx_frontend_t
- &#x22C6;frontend);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_demux_t*
- demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_frontend_t*
- frontend</para>
-</entry><entry
- align="char">
-<para>Pointer to the front-end instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EEXIST</para>
-</entry><entry
- align="char">
-<para>A front-end with the same value of the id field already
- registered.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINUSE</para>
-</entry><entry
- align="char">
-<para>The demux is in use.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENOMEM</para>
-</entry><entry
- align="char">
-<para>No more front-ends can be added.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>remove_frontend()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Indicates that the given front-end, registered by a call to add_frontend(), can
- no longer be connected as a TS source by this demux. The function should be
- called when a front-end driver or a demux driver is removed from the system.
- If the front-end is in use, the function fails with the return value of -EBUSY.
- After successfully calling this function, the caller can free the memory of
- the frontend struct if it was dynamically allocated before the add_frontend()
- operation.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int remove_frontend(dmx_demux_t&#x22C6; demux,
- dmx_frontend_t&#x22C6; frontend);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_demux_t*
- demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_frontend_t*
- frontend</para>
-</entry><entry
- align="char">
-<para>Pointer to the front-end instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EBUSY</para>
-</entry><entry
- align="char">
-<para>The front-end is in use, i.e. a call to connect_frontend()
- has not been followed by a call to disconnect_frontend().</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>get_frontends()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Provides the APIs of the front-ends that have been registered for this demux.
- Any of the front-ends obtained with this call can be used as a parameter for
- connect_frontend().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>The include file demux.h contains the macro DMX_FE_ENTRY() for
- converting an element of the generic type struct list_head* to the type
- dmx_frontend_t*. The caller must not free the memory of any of the elements
- obtained via this function call.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>struct list_head&#x22C6; get_frontends(dmx_demux_t&#x22C6; demux);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_demux_t*
- demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_demux_t*</para>
-</entry><entry
- align="char">
-<para>A list of front-end interfaces, or NULL in the case of an
- empty list.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>connect_frontend()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Connects the TS output of the front-end to the input of the demux. A demux
- can only be connected to a front-end registered to the demux with the function
- add_frontend().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>It may or may not be possible to connect multiple demuxes to the same
- front-end, depending on the capabilities of the HW platform. When not used,
- the front-end should be released by calling disconnect_frontend().</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int connect_frontend(dmx_demux_t&#x22C6; demux,
- dmx_frontend_t&#x22C6; frontend);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_demux_t*
- demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_frontend_t*
- frontend</para>
-</entry><entry
- align="char">
-<para>Pointer to the front-end instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EBUSY</para>
-</entry><entry
- align="char">
-<para>The front-end is in use.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>disconnect_frontend()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Disconnects the demux and a front-end previously connected by a
- connect_frontend() call.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int disconnect_frontend(dmx_demux_t&#x22C6; demux);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_demux_t*
- demux</para>
-</entry><entry
- align="char">
-<para>Pointer to the demux API and instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
- </section></section>
-<section id="demux_callback_api">
-<title>Demux Callback API</title>
-<para>This kernel-space API comprises the callback functions that deliver filtered data to the
-demux client. Unlike the other APIs, these API functions are provided by the client and called
-from the demux code.
-</para>
-<para>The function pointers of this abstract interface are not packed into a structure as in the
-other demux APIs, because the callback functions are registered and used independent
-of each other. As an example, it is possible for the API client to provide several
-callback functions for receiving TS packets and no callbacks for PES packets or
-sections.
-</para>
-<para>The functions that implement the callback API need not be re-entrant: when a demux
-driver calls one of these functions, the driver is not allowed to call the function again before
-the original call returns. If a callback is triggered by a hardware interrupt, it is recommended
-to use the Linux &#8220;bottom half&#8221; mechanism or start a tasklet instead of making the callback
-function call directly from a hardware interrupt.
-</para>
-
-<section
-role="subsection"><title>dmx_ts_cb()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function, provided by the client of the demux API, is called from the
- demux code. The function is only called when filtering on this TS feed has
- been enabled using the start_filtering() function.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>Any TS packets that match the filter settings are copied to a circular buffer. The
- filtered TS packets are delivered to the client using this callback function. The
- size of the circular buffer is controlled by the circular_buffer_size parameter
- of the set() function in the TS Feed API. It is expected that the buffer1 and
- buffer2 callback parameters point to addresses within the circular buffer, but
- other implementations are also possible. Note that the called party should not
- try to free the memory the buffer1 and buffer2 parameters point to.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>When this function is called, the buffer1 parameter typically points to the
- start of the first undelivered TS packet within a circular buffer. The buffer2
- buffer parameter is normally NULL, except when the received TS packets have
- crossed the last address of the circular buffer and &#8221;wrapped&#8221; to the beginning
- of the buffer. In the latter case the buffer1 parameter would contain an address
- within the circular buffer, while the buffer2 parameter would contain the first
- address of the circular buffer.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>The number of bytes delivered with this function (i.e. buffer1_length +
- buffer2_length) is usually equal to the value of callback_length parameter
- given in the set() function, with one exception: if a timeout occurs before
- receiving callback_length bytes of TS data, any undelivered packets are
- immediately delivered to the client by calling this function. The timeout
- duration is controlled by the set() function in the TS Feed API.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>If a TS packet is received with errors that could not be fixed by the TS-level
- forward error correction (FEC), the Transport_error_indicator flag of the TS
- packet header should be set. The TS packet should not be discarded, as
- the error can possibly be corrected by a higher layer protocol. If the called
- party is slow in processing the callback, it is possible that the circular buffer
- eventually fills up. If this happens, the demux driver should discard any TS
- packets received while the buffer is full. The error should be indicated to the
- client on the next callback by setting the success parameter to the value of
- DMX_OVERRUN_ERROR.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>The type of data returned to the callback can be selected by the new
- function int (*set_type) (struct dmx_ts_feed_s* feed, int type, dmx_ts_pes_t
- pes_type) which is part of the dmx_ts_feed_s struct (also cf. to the
- include file ost/demux.h) The type parameter decides if the raw TS packet
- (TS_PACKET) or just the payload (TS_PACKET&#8212;TS_PAYLOAD_ONLY)
- should be returned. If additionally the TS_DECODER bit is set the stream
- will also be sent to the hardware MPEG decoder. In this case, the second
- flag decides as what kind of data the stream should be interpreted. The
- possible choices are one of DMX_TS_PES_AUDIO, DMX_TS_PES_VIDEO,
- DMX_TS_PES_TELETEXT, DMX_TS_PES_SUBTITLE,
- DMX_TS_PES_PCR, or DMX_TS_PES_OTHER.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int dmx_ts_cb(__u8&#x22C6; buffer1, size_t buffer1_length,
- __u8&#x22C6; buffer2, size_t buffer2_length, dmx_ts_feed_t&#x22C6;
- source, dmx_success_t success);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>__u8* buffer1</para>
-</entry><entry
- align="char">
-<para>Pointer to the start of the filtered TS packets.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t buffer1_length</para>
-</entry><entry
- align="char">
-<para>Length of the TS data in buffer1.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u8* buffer2</para>
-</entry><entry
- align="char">
-<para>Pointer to the tail of the filtered TS packets, or NULL.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t buffer2_length</para>
-</entry><entry
- align="char">
-<para>Length of the TS data in buffer2.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_ts_feed_t*
- source</para>
-</entry><entry
- align="char">
-<para>Indicates which TS feed is the source of the callback.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_success_t
- success</para>
-</entry><entry
- align="char">
-<para>Indicates if there was an error in TS reception.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>Continue filtering.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-1</para>
-</entry><entry
- align="char">
-<para>Stop filtering - has the same effect as a call to
- stop_filtering() on the TS Feed API.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>dmx_section_cb()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function, provided by the client of the demux API, is called from the
- demux code. The function is only called when filtering of sections has been
- enabled using the function start_filtering() of the section feed API. When the
- demux driver has received a complete section that matches at least one section
- filter, the client is notified via this callback function. Normally this function is
- called for each received section; however, it is also possible to deliver multiple
- sections with one callback, for example when the system load is high. If an
- error occurs while receiving a section, this function should be called with
- the corresponding error type set in the success field, whether or not there is
- data to deliver. The Section Feed implementation should maintain a circular
- buffer for received sections. However, this is not necessary if the Section Feed
- API is implemented as a client of the TS Feed API, because the TS Feed
- implementation then buffers the received data. The size of the circular buffer
- can be configured using the set() function in the Section Feed API. If there
- is no room in the circular buffer when a new section is received, the section
- must be discarded. If this happens, the value of the success parameter should
- be DMX_OVERRUN_ERROR on the next callback.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int dmx_section_cb(__u8&#x22C6; buffer1, size_t
- buffer1_length, __u8&#x22C6; buffer2, size_t
- buffer2_length, dmx_section_filter_t&#x22C6; source,
- dmx_success_t success);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>__u8* buffer1</para>
-</entry><entry
- align="char">
-<para>Pointer to the start of the filtered section, e.g. within the
- circular buffer of the demux driver.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t buffer1_length</para>
-</entry><entry
- align="char">
-<para>Length of the filtered section data in buffer1, including
- headers and CRC.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u8* buffer2</para>
-</entry><entry
- align="char">
-<para>Pointer to the tail of the filtered section data, or NULL.
- Useful to handle the wrapping of a circular buffer.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t buffer2_length</para>
-</entry><entry
- align="char">
-<para>Length of the filtered section data in buffer2, including
- headers and CRC.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_section_filter_t*
- filter</para>
-</entry><entry
- align="char">
-<para>Indicates the filter that triggered the callback.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_success_t
- success</para>
-</entry><entry
- align="char">
-<para>Indicates if there was an error in section reception.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>Continue filtering.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-1</para>
-</entry><entry
- align="char">
-<para>Stop filtering - has the same effect as a call to
- stop_filtering() on the Section Feed API.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
- </section></section>
-<section id="ts_feed_api">
-<title>TS Feed API</title>
-<para>A TS feed is typically mapped to a hardware PID filter on the demux chip.
-Using this API, the client can set the filtering properties to start/stop filtering TS
-packets on a particular TS feed. The API is defined as an abstract interface of the type
-dmx_ts_feed_t.
-</para>
-<para>The functions that implement the interface should be defined static or module private. The
-client can get the handle of a TS feed API by calling the function allocate_ts_feed() in the
-demux API.
-</para>
-
-<section
-role="subsection"><title>set()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function sets the parameters of a TS feed. Any filtering in progress on the
- TS feed must be stopped before calling this function.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int set ( dmx_ts_feed_t&#x22C6; feed, __u16 pid, size_t
- callback_length, size_t circular_buffer_size, int
- descramble, struct timespec timeout);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_ts_feed_t* feed</para>
-</entry><entry
- align="char">
-<para>Pointer to the TS feed API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u16 pid</para>
-</entry><entry
- align="char">
-<para>PID value to filter. Only the TS packets carrying the
- specified PID will be passed to the API client.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t
- callback_length</para>
-</entry><entry
- align="char">
-<para>Number of bytes to deliver with each call to the
- dmx_ts_cb() callback function. The value of this
- parameter should be a multiple of 188.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t
- circular_buffer_size</para>
-</entry><entry
- align="char">
-<para>Size of the circular buffer for the filtered TS packets.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int descramble</para>
-</entry><entry
- align="char">
-<para>If non-zero, descramble the filtered TS packets.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct timespec
- timeout</para>
-</entry><entry
- align="char">
-<para>Maximum time to wait before delivering received TS
- packets to the client.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENOMEM</para>
-</entry><entry
- align="char">
-<para>Not enough memory for the requested buffer size.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENOSYS</para>
-</entry><entry
- align="char">
-<para>No descrambling facility available for TS.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>start_filtering()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Starts filtering TS packets on this TS feed, according to its settings. The PID
- value to filter can be set by the API client. All matching TS packets are
- delivered asynchronously to the client, using the callback function registered
- with allocate_ts_feed().</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int start_filtering(dmx_ts_feed_t&#x22C6; feed);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_ts_feed_t* feed</para>
-</entry><entry
- align="char">
-<para>Pointer to the TS feed API and instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>stop_filtering()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Stops filtering TS packets on this TS feed.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int stop_filtering(dmx_ts_feed_t&#x22C6; feed);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_ts_feed_t* feed</para>
-</entry><entry
- align="char">
-<para>Pointer to the TS feed API and instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
- </section></section>
-<section id="section_feed_api">
-<title>Section Feed API</title>
-<para>A section feed is a resource consisting of a PID filter and a set of section filters. Using this
-API, the client can set the properties of a section feed and to start/stop filtering. The API is
-defined as an abstract interface of the type dmx_section_feed_t. The functions that implement
-the interface should be defined static or module private. The client can get the handle of
-a section feed API by calling the function allocate_section_feed() in the demux
-API.
-</para>
-<para>On demux platforms that provide section filtering in hardware, the Section Feed API
-implementation provides a software wrapper for the demux hardware. Other platforms may
-support only PID filtering in hardware, requiring that TS packets are converted to sections in
-software. In the latter case the Section Feed API implementation can be a client of the TS
-Feed API.
-</para>
-
-</section>
-<section id="kdapi_set">
-<title>set()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function sets the parameters of a section feed. Any filtering in progress on
- the section feed must be stopped before calling this function. If descrambling
- is enabled, the payload_scrambling_control and address_scrambling_control
- fields of received DVB datagram sections should be observed. If either one is
- non-zero, the section should be descrambled either in hardware or using the
- functions descramble_mac_address() and descramble_section_payload() of the
- demux API. Note that according to the MPEG-2 Systems specification, only
- the payloads of private sections can be scrambled while the rest of the section
- data must be sent in the clear.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int set(dmx_section_feed_t&#x22C6; feed, __u16 pid, size_t
- circular_buffer_size, int descramble, int
- check_crc);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_section_feed_t*
- feed</para>
-</entry><entry
- align="char">
-<para>Pointer to the section feed API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u16 pid</para>
-</entry><entry
- align="char">
-<para>PID value to filter; only the TS packets carrying the
- specified PID will be accepted.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t
- circular_buffer_size</para>
-</entry><entry
- align="char">
-<para>Size of the circular buffer for filtered sections.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int descramble</para>
-</entry><entry
- align="char">
-<para>If non-zero, descramble any sections that are scrambled.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int check_crc</para>
-</entry><entry
- align="char">
-<para>If non-zero, check the CRC values of filtered sections.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENOMEM</para>
-</entry><entry
- align="char">
-<para>Not enough memory for the requested buffer size.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENOSYS</para>
-</entry><entry
- align="char">
-<para>No descrambling facility available for sections.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameters.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>allocate_filter()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function is used to allocate a section filter on the demux. It should only be
- called when no filtering is in progress on this section feed. If a filter cannot be
- allocated, the function fails with -ENOSPC. See in section ?? for the format of
- the section filter.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>The bitfields filter_mask and filter_value should only be modified when no
- filtering is in progress on this section feed. filter_mask controls which bits of
- filter_value are compared with the section headers/payload. On a binary value
- of 1 in filter_mask, the corresponding bits are compared. The filter only accepts
- sections that are equal to filter_value in all the tested bit positions. Any changes
- to the values of filter_mask and filter_value are guaranteed to take effect only
- when the start_filtering() function is called next time. The parent pointer in
- the struct is initialized by the API implementation to the value of the feed
- parameter. The priv pointer is not used by the API implementation, and can
- thus be freely utilized by the caller of this function. Any data pointed to by the
- priv pointer is available to the recipient of the dmx_section_cb() function call.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>While the maximum section filter length (DMX_MAX_FILTER_SIZE) is
- currently set at 16 bytes, hardware filters of that size are not available on all
- platforms. Therefore, section filtering will often take place first in hardware,
- followed by filtering in software for the header bytes that were not covered
- by a hardware filter. The filter_mask field can be checked to determine how
- many bytes of the section filter are actually used, and if the hardware filter will
- suffice. Additionally, software-only section filters can optionally be allocated
- to clients when all hardware section filters are in use. Note that on most demux
- hardware it is not possible to filter on the section_length field of the section
- header &#8211; thus this field is ignored, even though it is included in filter_value and
- filter_mask fields.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int allocate_filter(dmx_section_feed_t&#x22C6; feed,
- dmx_section_filter_t&#x22C6;&#x22C6; filter);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_section_feed_t*
- feed</para>
-</entry><entry
- align="char">
-<para>Pointer to the section feed API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_section_filter_t**
- filter</para>
-</entry><entry
- align="char">
-<para>Pointer to the allocated filter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENOSPC</para>
-</entry><entry
- align="char">
-<para>No filters of given type and length available.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameters.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>release_filter()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This function releases all the resources of a previously allocated section filter.
- The function should not be called while filtering is in progress on this section
- feed. After calling this function, the caller should not try to dereference the
- filter pointer.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int release_filter ( dmx_section_feed_t&#x22C6; feed,
- dmx_section_filter_t&#x22C6; filter);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_section_feed_t*
- feed</para>
-</entry><entry
- align="char">
-<para>Pointer to the section feed API and instance data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_section_filter_t*
- filter</para>
-</entry><entry
- align="char">
-<para>I/O Pointer to the instance data of a section filter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-ENODEV</para>
-</entry><entry
- align="char">
-<para>No such filter allocated.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>start_filtering()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Starts filtering sections on this section feed, according to its settings. Sections
- are first filtered based on their PID and then matched with the section
- filters allocated for this feed. If the section matches the PID filter and
- at least one section filter, it is delivered to the API client. The section
- is delivered asynchronously using the callback function registered with
- allocate_section_feed().</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int start_filtering ( dmx_section_feed_t&#x22C6; feed );</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_section_feed_t*
- feed</para>
-</entry><entry
- align="char">
-<para>Pointer to the section feed API and instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section
-role="subsection"><title>stop_filtering()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>Stops filtering sections on this section feed. Note that any changes to the
- filtering parameters (filter_value, filter_mask, etc.) should only be made when
- filtering is stopped.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int stop_filtering ( dmx_section_feed_t&#x22C6; feed );</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>dmx_section_feed_t*
- feed</para>
-</entry><entry
- align="char">
-<para>Pointer to the section feed API and instance data.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURNS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>0</para>
-</entry><entry
- align="char">
-<para>The function was completed without errors.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>-EINVAL</para>
-</entry><entry
- align="char">
-<para>Bad parameter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section>
diff --git a/Documentation/DocBook/media/v4l/biblio.xml b/Documentation/DocBook/media/v4l/biblio.xml
index fdee6b3f3eca..9beb30f0071b 100644
--- a/Documentation/DocBook/media/v4l/biblio.xml
+++ b/Documentation/DocBook/media/v4l/biblio.xml
@@ -177,6 +177,24 @@ Signal - NTSC for Studio Applications"</title>
1125-Line High-Definition Production"</title>
</biblioentry>
+ <biblioentry id="smpte431">
+ <abbrev>SMPTE&nbsp;RP&nbsp;431-2</abbrev>
+ <authorgroup>
+ <corpauthor>Society of Motion Picture and Television Engineers
+(<ulink url="http://www.smpte.org">http://www.smpte.org</ulink>)</corpauthor>
+ </authorgroup>
+ <title>SMPTE RP 431-2:2011 "D-Cinema Quality - Reference Projector and Environment"</title>
+ </biblioentry>
+
+ <biblioentry id="smpte2084">
+ <abbrev>SMPTE&nbsp;ST&nbsp;2084</abbrev>
+ <authorgroup>
+ <corpauthor>Society of Motion Picture and Television Engineers
+(<ulink url="http://www.smpte.org">http://www.smpte.org</ulink>)</corpauthor>
+ </authorgroup>
+ <title>SMPTE ST 2084:2014 "High Dynamic Range Electro-Optical Transfer Function of Master Reference Displays"</title>
+ </biblioentry>
+
<biblioentry id="srgb">
<abbrev>sRGB</abbrev>
<authorgroup>
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index a0aef85d33c1..5701a08ed792 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2591,6 +2591,26 @@ and &v4l2-mbus-framefmt;.
</orderedlist>
</section>
+ <section>
+ <title>V4L2 in Linux 4.4</title>
+ <orderedlist>
+ <listitem>
+ <para>Renamed <constant>V4L2_TUNER_ADC</constant> to
+<constant>V4L2_TUNER_SDR</constant>. The use of
+<constant>V4L2_TUNER_ADC</constant> is deprecated now.
+ </para>
+ </listitem>
+ <listitem>
+ <para>Added <constant>V4L2_CID_RF_TUNER_RF_GAIN</constant>
+RF Tuner control.</para>
+ </listitem>
+ <listitem>
+ <para>Added transmitter support for Software Defined Radio (SDR)
+Interface.</para>
+ </listitem>
+ </orderedlist>
+ </section>
+
<section id="other">
<title>Relation of V4L2 to other Linux multimedia APIs</title>
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index 33aece541880..f13a429093f1 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -5418,6 +5418,18 @@ set. Unit is in Hz. The range and step are driver-specific.</entry>
<entry spanname="descr">Enables/disables IF automatic gain control (AGC)</entry>
</row>
<row>
+ <entry spanname="id"><constant>V4L2_CID_RF_TUNER_RF_GAIN</constant>&nbsp;</entry>
+ <entry>integer</entry>
+ </row>
+ <row>
+ <entry spanname="descr">The RF amplifier is the very first
+amplifier on the receiver signal path, just right after the antenna input.
+The difference between the LNA gain and the RF gain in this document is that
+the LNA gain is integrated in the tuner chip while the RF gain is a separate
+chip. There may be both RF and LNA gain controls in the same device.
+The range and step are driver-specific.</entry>
+ </row>
+ <row>
<entry spanname="id"><constant>V4L2_CID_RF_TUNER_LNA_GAIN</constant>&nbsp;</entry>
<entry>integer</entry>
</row>
@@ -5425,6 +5437,8 @@ set. Unit is in Hz. The range and step are driver-specific.</entry>
<entry spanname="descr">LNA (low noise amplifier) gain is first
gain stage on the RF tuner signal path. It is located very close to tuner
antenna input. Used when <constant>V4L2_CID_RF_TUNER_LNA_GAIN_AUTO</constant> is not set.
+See <constant>V4L2_CID_RF_TUNER_RF_GAIN</constant> to understand how RF gain
+and LNA gain differs from the each others.
The range and step are driver-specific.</entry>
</row>
<row>
diff --git a/Documentation/DocBook/media/v4l/dev-sdr.xml b/Documentation/DocBook/media/v4l/dev-sdr.xml
index f8903568a243..a659771f7b7c 100644
--- a/Documentation/DocBook/media/v4l/dev-sdr.xml
+++ b/Documentation/DocBook/media/v4l/dev-sdr.xml
@@ -28,6 +28,16 @@ Devices supporting the SDR receiver interface set the
<structfield>capabilities</structfield> field of &v4l2-capability;
returned by the &VIDIOC-QUERYCAP; ioctl. That flag means the device has an
Analog to Digital Converter (ADC), which is a mandatory element for the SDR receiver.
+ </para>
+ <para>
+Devices supporting the SDR transmitter interface set the
+<constant>V4L2_CAP_SDR_OUTPUT</constant> and
+<constant>V4L2_CAP_MODULATOR</constant> flag in the
+<structfield>capabilities</structfield> field of &v4l2-capability;
+returned by the &VIDIOC-QUERYCAP; ioctl. That flag means the device has an
+Digital to Analog Converter (DAC), which is a mandatory element for the SDR transmitter.
+ </para>
+ <para>
At least one of the read/write, streaming or asynchronous I/O methods must
be supported.
</para>
@@ -39,15 +49,16 @@ be supported.
<para>
SDR devices can support <link linkend="control">controls</link>, and must
support the <link linkend="tuner">tuner</link> ioctls. Tuner ioctls are used
-for setting the ADC sampling rate (sampling frequency) and the possible RF tuner
-frequency.
+for setting the ADC/DAC sampling rate (sampling frequency) and the possible
+radio frequency (RF).
</para>
<para>
-The <constant>V4L2_TUNER_ADC</constant> tuner type is used for ADC tuners, and
-the <constant>V4L2_TUNER_RF</constant> tuner type is used for RF tuners. The
-tuner index of the RF tuner (if any) must always follow the ADC tuner index.
-Normally the ADC tuner is #0 and the RF tuner is #1.
+The <constant>V4L2_TUNER_SDR</constant> tuner type is used for setting SDR
+device ADC/DAC frequency, and the <constant>V4L2_TUNER_RF</constant>
+tuner type is used for setting radio frequency.
+The tuner index of the RF tuner (if any) must always follow the SDR tuner index.
+Normally the SDR tuner is #0 and the RF tuner is #1.
</para>
<para>
@@ -59,9 +70,9 @@ The &VIDIOC-S-HW-FREQ-SEEK; ioctl is not supported.
<title>Data Format Negotiation</title>
<para>
-The SDR capture device uses the <link linkend="format">format</link> ioctls to
-select the capture format. Both the sampling resolution and the data streaming
-format are bound to that selectable format. In addition to the basic
+The SDR device uses the <link linkend="format">format</link> ioctls to
+select the capture and output format. Both the sampling resolution and the data
+streaming format are bound to that selectable format. In addition to the basic
<link linkend="format">format</link> ioctls, the &VIDIOC-ENUM-FMT; ioctl
must be supported as well.
</para>
@@ -69,7 +80,8 @@ must be supported as well.
<para>
To use the <link linkend="format">format</link> ioctls applications set the
<structfield>type</structfield> field of a &v4l2-format; to
-<constant>V4L2_BUF_TYPE_SDR_CAPTURE</constant> and use the &v4l2-sdr-format;
+<constant>V4L2_BUF_TYPE_SDR_CAPTURE</constant> or
+<constant>V4L2_BUF_TYPE_SDR_OUTPUT</constant> and use the &v4l2-sdr-format;
<structfield>sdr</structfield> member of the <structfield>fmt</structfield>
union as needed per the desired operation.
Currently there is two fields, <structfield>pixelformat</structfield> and
diff --git a/Documentation/DocBook/media/v4l/io.xml b/Documentation/DocBook/media/v4l/io.xml
index 7bbc2a48911e..da654031ef3f 100644
--- a/Documentation/DocBook/media/v4l/io.xml
+++ b/Documentation/DocBook/media/v4l/io.xml
@@ -1006,8 +1006,14 @@ must set this to 0.</entry>
<row>
<entry><constant>V4L2_BUF_TYPE_SDR_CAPTURE</constant></entry>
<entry>11</entry>
- <entry>Buffer for Software Defined Radio (SDR), see <xref
- linkend="sdr" />.</entry>
+ <entry>Buffer for Software Defined Radio (SDR) capture stream, see
+ <xref linkend="sdr" />.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_BUF_TYPE_SDR_OUTPUT</constant></entry>
+ <entry>12</entry>
+ <entry>Buffer for Software Defined Radio (SDR) output stream, see
+ <xref linkend="sdr" />.</entry>
</row>
</tbody>
</tgroup>
diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml
index 965ea916784a..d871245d2973 100644
--- a/Documentation/DocBook/media/v4l/pixfmt.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt.xml
@@ -540,6 +540,10 @@ colorspaces except for BT.2020 which uses limited range R'G'B' quantization.</pa
<entry>See <xref linkend="col-bt2020" />.</entry>
</row>
<row>
+ <entry><constant>V4L2_COLORSPACE_DCI_P3</constant></entry>
+ <entry>See <xref linkend="col-dcip3" />.</entry>
+ </row>
+ <row>
<entry><constant>V4L2_COLORSPACE_SMPTE240M</constant></entry>
<entry>See <xref linkend="col-smpte-240m" />.</entry>
</row>
@@ -601,6 +605,14 @@ colorspaces except for BT.2020 which uses limited range R'G'B' quantization.</pa
<entry><constant>V4L2_XFER_FUNC_NONE</constant></entry>
<entry>Do not use a transfer function (i.e. use linear RGB values).</entry>
</row>
+ <row>
+ <entry><constant>V4L2_XFER_FUNC_DCI_P3</constant></entry>
+ <entry>Use the DCI-P3 transfer function.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_XFER_FUNC_SMPTE2084</constant></entry>
+ <entry>Use the SMPTE 2084 transfer function.</entry>
+ </row>
</tbody>
</tgroup>
</table>
@@ -1154,6 +1166,68 @@ clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range
clamped to the range [-0.5&hellip;0.5]. The Yc'CbcCrc quantization is limited range.</para>
</section>
+ <section id="col-dcip3">
+ <title>Colorspace DCI-P3 (<constant>V4L2_COLORSPACE_DCI_P3</constant>)</title>
+ <para>The <xref linkend="smpte431" /> standard defines the colorspace used by cinema
+projectors that use the DCI-P3 colorspace.
+The default transfer function is <constant>V4L2_XFER_FUNC_DCI_P3</constant>.
+The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_709</constant>. Note that this
+colorspace does not specify a Y'CbCr encoding since it is not meant to be encoded
+to Y'CbCr. So this default Y'CbCr encoding was picked because it is the HDTV
+encoding. The default Y'CbCr quantization is limited range. The chromaticities of
+the primary colors and the white reference are:</para>
+ <table frame="none">
+ <title>DCI-P3 Chromaticities</title>
+ <tgroup cols="3" align="left">
+ &cs-str;
+ <thead>
+ <row>
+ <entry>Color</entry>
+ <entry>x</entry>
+ <entry>y</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry>Red</entry>
+ <entry>0.6800</entry>
+ <entry>0.3200</entry>
+ </row>
+ <row>
+ <entry>Green</entry>
+ <entry>0.2650</entry>
+ <entry>0.6900</entry>
+ </row>
+ <row>
+ <entry>Blue</entry>
+ <entry>0.1500</entry>
+ <entry>0.0600</entry>
+ </row>
+ <row>
+ <entry>White Reference</entry>
+ <entry>0.3140</entry>
+ <entry>0.3510</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <variablelist>
+ <varlistentry>
+ <term>Transfer function:</term>
+ <listitem>
+ <para>L' = L<superscript>1/2.6</superscript></para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Inverse Transfer function:</term>
+ <listitem>
+ <para>L = L'<superscript>2.6</superscript></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>Y'CbCr encoding is not specified. V4L2 defaults to Rec. 709.</para>
+ </section>
+
<section id="col-smpte-240m">
<title>Colorspace SMPTE 240M (<constant>V4L2_COLORSPACE_SMPTE240M</constant>)</title>
<para>The <xref linkend="smpte240m" /> standard was an interim standard used during
@@ -1402,6 +1476,41 @@ and <constant>V4L2_QUANTIZATION_FULL_RANGE</constant>.</para>
</section>
+ <section>
+ <title>Detailed Transfer Function Descriptions</title>
+ <section id="xf-smpte-2084">
+ <title>Transfer Function SMPTE 2084 (<constant>V4L2_XFER_FUNC_SMPTE2084</constant>)</title>
+ <para>The <xref linkend="smpte2084" /> standard defines the transfer function used by
+High Dynamic Range content.</para>
+ <variablelist>
+ <varlistentry>
+ <term>Constants:</term>
+ <listitem>
+ <para>m1 = (2610 / 4096) / 4</para>
+ <para>m2 = (2523 / 4096) * 128</para>
+ <para>c1 = 3424 / 4096</para>
+ <para>c2 = (2413 / 4096) * 32</para>
+ <para>c3 = (2392 / 4096) * 32</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Transfer function:</term>
+ <listitem>
+ <para>L' = ((c1 + c2 * L<superscript>m1</superscript>) / (1 + c3 * L<superscript>m1</superscript>))<superscript>m2</superscript></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>Inverse Transfer function:</term>
+ <listitem>
+ <para>L = (max(L'<superscript>1/m2</superscript> - c1, 0) / (c2 - c3 * L'<superscript>1/m2</superscript>))<superscript>1/m1</superscript></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ </section>
+
<section id="pixfmt-indexed">
<title>Indexed Format</title>
@@ -1623,7 +1732,7 @@ extended control <constant>V4L2_CID_MPEG_STREAM_TYPE</constant>, see
<section id="sdr-formats">
<title>SDR Formats</title>
- <para>These formats are used for <link linkend="sdr">SDR Capture</link>
+ <para>These formats are used for <link linkend="sdr">SDR</link>
interface only.</para>
&sub-sdr-cu08;
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index e98caa1c39bd..7e61643358de 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -151,9 +151,18 @@ Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab,
structs, ioctls) must be noted in more detail in the history chapter
(compat.xml), along with the possible impact on existing drivers and
applications. -->
+ <revision>
+ <revnumber>4.4</revnumber>
+ <date>2015-05-26</date>
+ <authorinitials>ap</authorinitials>
+ <revremark>Renamed V4L2_TUNER_ADC to V4L2_TUNER_SDR.
+Added V4L2_CID_RF_TUNER_RF_GAIN control.
+Added transmitter support for Software Defined Radio (SDR) Interface.
+ </revremark>
+ </revision>
<revision>
- <revnumber>3.21</revnumber>
+ <revnumber>4.1</revnumber>
<date>2015-02-13</date>
<authorinitials>mcc</authorinitials>
<revremark>Fix documentation for media controller device nodes and add support for DVB device nodes.
@@ -557,7 +566,7 @@ and discussions on the V4L mailing list.</revremark>
</partinfo>
<title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.19</subtitle>
+ <subtitle>Revision 4.4</subtitle>
<chapter id="common">
&sub-common;
diff --git a/Documentation/DocBook/media/v4l/vidioc-encoder-cmd.xml b/Documentation/DocBook/media/v4l/vidioc-encoder-cmd.xml
index fc1d4625a78c..70a4a08e9404 100644
--- a/Documentation/DocBook/media/v4l/vidioc-encoder-cmd.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-encoder-cmd.xml
@@ -130,7 +130,7 @@ encoding will continue until the end of the current <wordasword>Group
Of Pictures</wordasword>, otherwise encoding will stop immediately.
When the encoder is already stopped, this command does
nothing. mem2mem encoders will send a <constant>V4L2_EVENT_EOS</constant> event
-when the last frame has been decoded and all frames are ready to be dequeued and
+when the last frame has been encoded and all frames are ready to be dequeued and
will set the <constant>V4L2_BUF_FLAG_LAST</constant> buffer flag on the last
buffer of the capture queue to indicate there will be no new buffers produced to
dequeue. This buffer may be empty, indicated by the driver setting the
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
index c5bdbfcc42b3..842536aae8b4 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
@@ -200,6 +200,13 @@ Valid if this control is of type <constant>V4L2_CTRL_TYPE_U16</constant>.</entry
</row>
<row>
<entry></entry>
+ <entry>__u32 *</entry>
+ <entry><structfield>p_u32</structfield></entry>
+ <entry>A pointer to a matrix control of unsigned 32-bit values.
+Valid if this control is of type <constant>V4L2_CTRL_TYPE_U32</constant>.</entry>
+ </row>
+ <row>
+ <entry></entry>
<entry>void *</entry>
<entry><structfield>ptr</structfield></entry>
<entry>A pointer to a compound type which can be an N-dimensional array and/or a
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-fmt.xml b/Documentation/DocBook/media/v4l/vidioc-g-fmt.xml
index 4fe19a7a9a31..ffcb448251f0 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-fmt.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-fmt.xml
@@ -175,7 +175,7 @@ capture and output devices.</entry>
<entry>&v4l2-sdr-format;</entry>
<entry><structfield>sdr</structfield></entry>
<entry>Definition of a data format, see
-<xref linkend="pixfmt" />, used by SDR capture devices.</entry>
+<xref linkend="pixfmt" />, used by SDR capture and output devices.</entry>
</row>
<row>
<entry></entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-modulator.xml b/Documentation/DocBook/media/v4l/vidioc-g-modulator.xml
index 7068b599a00d..96e17b344c5d 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-modulator.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-modulator.xml
@@ -78,6 +78,12 @@ different audio modulation if the request cannot be satisfied. However
this is a write-only ioctl, it does not return the actual audio
modulation selected.</para>
+ <para><link linkend="sdr">SDR</link> specific modulator types are
+<constant>V4L2_TUNER_SDR</constant> and <constant>V4L2_TUNER_RF</constant>.
+For SDR devices <structfield>txsubchans</structfield> field must be
+initialized to zero.
+The term 'modulator' means SDR transmitter in this context.</para>
+
<para>To change the radio frequency the &VIDIOC-S-FREQUENCY; ioctl
is available.</para>
@@ -140,7 +146,13 @@ indicator, for example a stereo pilot tone.</entry>
</row>
<row>
<entry>__u32</entry>
- <entry><structfield>reserved</structfield>[4]</entry>
+ <entry><structfield>type</structfield></entry>
+ <entry spanname="hspan">Type of the modulator, see <xref
+ linkend="v4l2-tuner-type" />.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[3]</entry>
<entry>Reserved for future extensions. Drivers and
applications must set the array to zero.</entry>
</row>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
index b0d865933da6..459b7e561f3c 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
@@ -80,6 +80,12 @@ if the requested mode is invalid or unsupported. Since this is a
<!-- FIXME -->write-only ioctl, it does not return the actually
selected audio mode.</para>
+ <para><link linkend="sdr">SDR</link> specific tuner types are
+<constant>V4L2_TUNER_SDR</constant> and <constant>V4L2_TUNER_RF</constant>.
+For SDR devices <structfield>audmode</structfield> field must be
+initialized to zero.
+The term 'tuner' means SDR receiver in this context.</para>
+
<para>To change the radio frequency the &VIDIOC-S-FREQUENCY; ioctl
is available.</para>
@@ -261,6 +267,16 @@ applications must set the array to zero.</entry>
<entry>2</entry>
<entry></entry>
</row>
+ <row>
+ <entry><constant>V4L2_TUNER_SDR</constant></entry>
+ <entry>4</entry>
+ <entry></entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_TUNER_RF</constant></entry>
+ <entry>5</entry>
+ <entry></entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/vidioc-querycap.xml b/Documentation/DocBook/media/v4l/vidioc-querycap.xml
index 20fda75a012d..cd82148dedd7 100644
--- a/Documentation/DocBook/media/v4l/vidioc-querycap.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-querycap.xml
@@ -308,6 +308,12 @@ modulator programming see
fields.</entry>
</row>
<row>
+ <entry><constant>V4L2_CAP_SDR_OUTPUT</constant></entry>
+ <entry>0x00400000</entry>
+ <entry>The device supports the
+<link linkend="sdr">SDR Output</link> interface.</entry>
+ </row>
+ <row>
<entry><constant>V4L2_CAP_READWRITE</constant></entry>
<entry>0x01000000</entry>
<entry>The device supports the <link
diff --git a/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml b/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
index 6ec39c698baf..55b7582cf314 100644
--- a/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
@@ -101,8 +101,9 @@ prematurely end the enumeration).</para></footnote></para>
next supported non-compound control, or <errorcode>EINVAL</errorcode>
if there is none. In addition, the <constant>V4L2_CTRL_FLAG_NEXT_COMPOUND</constant>
flag can be specified to enumerate all compound controls (i.e. controls
-with type &ge; <constant>V4L2_CTRL_COMPOUND_TYPES</constant>). Specify both
-<constant>V4L2_CTRL_FLAG_NEXT_CTRL</constant> and
+with type &ge; <constant>V4L2_CTRL_COMPOUND_TYPES</constant> and/or array
+control, in other words controls that contain more than one value).
+Specify both <constant>V4L2_CTRL_FLAG_NEXT_CTRL</constant> and
<constant>V4L2_CTRL_FLAG_NEXT_COMPOUND</constant> in order to enumerate
all controls, compound or not. Drivers which do not support these flags yet
always return <errorcode>EINVAL</errorcode>.</para>
@@ -422,7 +423,7 @@ the array to zero.</entry>
<entry>any</entry>
<entry>An integer-valued control ranging from minimum to
maximum inclusive. The step value indicates the increment between
-values which are actually different on the hardware.</entry>
+values.</entry>
</row>
<row>
<entry><constant>V4L2_CTRL_TYPE_BOOLEAN</constant></entry>
@@ -518,7 +519,7 @@ Older drivers which do not support this feature return an
<entry>any</entry>
<entry>An unsigned 8-bit valued control ranging from minimum to
maximum inclusive. The step value indicates the increment between
-values which are actually different on the hardware.
+values.
</entry>
</row>
<row>
@@ -528,7 +529,17 @@ values which are actually different on the hardware.
<entry>any</entry>
<entry>An unsigned 16-bit valued control ranging from minimum to
maximum inclusive. The step value indicates the increment between
-values which are actually different on the hardware.
+values.
+</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CTRL_TYPE_U32</constant></entry>
+ <entry>any</entry>
+ <entry>any</entry>
+ <entry>any</entry>
+ <entry>An unsigned 32-bit valued control ranging from minimum to
+maximum inclusive. The step value indicates the increment between
+values.
</entry>
</row>
</tbody>
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
index f3f5fe5b64c9..92037033f5eb 100644
--- a/Documentation/DocBook/media_api.tmpl
+++ b/Documentation/DocBook/media_api.tmpl
@@ -38,7 +38,7 @@
<title>LINUX MEDIA INFRASTRUCTURE API</title>
<copyright>
- <year>2009-2014</year>
+ <year>2009-2015</year>
<holder>LinuxTV Developers</holder>
</copyright>
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index fd89b04d34f0..4710e4afef19 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -659,8 +659,8 @@ succinct and descriptive, but that is what a well-written summary
should do.
The "summary phrase" may be prefixed by tags enclosed in square
-brackets: "Subject: [PATCH tag] <summary phrase>". The tags are not
-considered part of the summary phrase, but describe how the patch
+brackets: "Subject: [PATCH <tag>...] <summary phrase>". The tags are
+not considered part of the summary phrase, but describe how the patch
should be treated. Common tags might include a version descriptor if
the multiple versions of the patch have been sent out in response to
comments (i.e., "v1, v2, v3"), or "RFC" to indicate a request for
@@ -672,8 +672,8 @@ the patch series.
A couple of example Subjects:
- Subject: [patch 2/5] ext2: improve scalability of bitmap searching
- Subject: [PATCHv2 001/207] x86: fix eflags tracking
+ Subject: [PATCH 2/5] ext2: improve scalability of bitmap searching
+ Subject: [PATCH v2 01/27] x86: fix eflags tracking
The "from" line must be the very first line in the message body,
and has the form:
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 62435bb25266..5bda5031c83d 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -14,8 +14,43 @@ Statistics for individual zram devices are exported through sysfs nodes at
* Usage
+There are several ways to configure and manage zram device(-s):
+a) using zram and zram_control sysfs attributes
+b) using zramctl utility, provided by util-linux (util-linux@vger.kernel.org).
+
+In this document we will describe only 'manual' zram configuration steps,
+IOW, zram and zram_control sysfs attributes.
+
+In order to get a better idea about zramctl please consult util-linux
+documentation, zramctl man-page or `zramctl --help'. Please be informed
+that zram maintainers do not develop/maintain util-linux or zramctl, should
+you have any questions please contact util-linux@vger.kernel.org
+
Following shows a typical sequence of steps for using zram.
+WARNING
+=======
+For the sake of simplicity we skip error checking parts in most of the
+examples below. However, it is your sole responsibility to handle errors.
+
+zram sysfs attributes always return negative values in case of errors.
+The list of possible return codes:
+-EBUSY -- an attempt to modify an attribute that cannot be changed once
+the device has been initialised. Please reset device first;
+-ENOMEM -- zram was not able to allocate enough memory to fulfil your
+needs;
+-EINVAL -- invalid input has been provided.
+
+If you use 'echo', the returned value that is changed by 'echo' utility,
+and, in general case, something like:
+
+ echo 3 > /sys/block/zram0/max_comp_streams
+ if [ $? -ne 0 ];
+ handle_error
+ fi
+
+should suffice.
+
1) Load Module:
modprobe zram num_devices=4
This creates 4 devices: /dev/zram{0,1,2,3}
@@ -47,7 +82,7 @@ max_comp_streams adjustment.
3) Select compression algorithm
Using comp_algorithm device attribute one can see available and
- currently selected (shown in square brackets) compression algortithms,
+ currently selected (shown in square brackets) compression algorithms,
change selected compression algorithm (once the device is initialised
there is no way to change compression algorithm).
@@ -119,7 +154,7 @@ execute
8) Stats:
Per-device statistics are exported as various nodes under /sys/block/zram<id>/
-A brief description of exported device attritbutes. For more details please
+A brief description of exported device attributes. For more details please
read Documentation/ABI/testing/sysfs-block-zram.
Name access description
@@ -140,8 +175,9 @@ zero_pages RO the number of zero filled pages written to this disk
orig_data_size RO uncompressed size of data stored in this disk
compr_data_size RO compressed size of data stored in this disk
mem_used_total RO the amount of memory allocated for this disk
-mem_used_max RW the maximum amount memory zram have consumed to
- store compressed data
+mem_used_max RW the maximum amount of memory zram have consumed to
+ store the data (to reset this counter to the actual
+ current value, write 1 to this attribute)
mem_limit RW the maximum amount of memory ZRAM can use to store
the compressed data
pages_compacted RO the number of pages freed during compaction
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 12686bec37b9..52fa9f353342 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -59,7 +59,7 @@ cgroups. Here is what you can do.
- At macro level, first dd should finish first. To get more precise data, keep
on looking at (with the help of script), at blkio.disk_time and
blkio.disk_sectors files of both test1 and test2 groups. This will tell how
- much disk time (in milli seconds), each group got and how many secotors each
+ much disk time (in milliseconds), each group got and how many sectors each
group dispatched to the disk. We provide fairness in terms of disk time, so
ideally io.disk_time of cgroups should be in proportion to the weight.
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index f935fac1e73b..c6256ae9885b 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -637,6 +637,10 @@ void exit(struct task_struct *task)
Called during task exit.
+void free(struct task_struct *task)
+
+Called when the task_struct is freed.
+
void bind(struct cgroup *root)
(cgroup_mutex held by caller)
diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt
index c96a72cbb30a..e831cb2b8394 100644
--- a/Documentation/cgroups/freezer-subsystem.txt
+++ b/Documentation/cgroups/freezer-subsystem.txt
@@ -50,7 +50,7 @@ being frozen. This allows the bash example above and gdb to run as
expected.
The cgroup freezer is hierarchical. Freezing a cgroup freezes all
-tasks beloning to the cgroup and all its descendant cgroups. Each
+tasks belonging to the cgroup and all its descendant cgroups. Each
cgroup has its own state (self-state) and the state inherited from the
parent (parent-state). Iff both states are THAWED, the cgroup is
THAWED.
diff --git a/Documentation/cgroups/unified-hierarchy.txt b/Documentation/cgroups/unified-hierarchy.txt
index e0975c2cf03d..781b1d475bcf 100644
--- a/Documentation/cgroups/unified-hierarchy.txt
+++ b/Documentation/cgroups/unified-hierarchy.txt
@@ -107,12 +107,6 @@ root of unified hierarchy can be bound to other hierarchies. This
allows mixing unified hierarchy with the traditional multiple
hierarchies in a fully backward compatible way.
-For development purposes, the following boot parameter makes all
-controllers to appear on the unified hierarchy whether supported or
-not.
-
- cgroup__DEVEL__legacy_files_on_dfl
-
A controller can be moved across hierarchies only after the controller
is no longer referenced in its current hierarchy. Because per-cgroup
controller states are destroyed asynchronously and controllers may
@@ -341,11 +335,11 @@ is riddled with issues.
unnecessarily complicated and probably done this way because event
delivery itself was expensive.
-Unified hierarchy implements an interface file "cgroup.populated"
-which can be used to monitor whether the cgroup's subhierarchy has
-tasks in it or not. Its value is 0 if there is no task in the cgroup
-and its descendants; otherwise, 1. poll and [id]notify events are
-triggered when the value changes.
+Unified hierarchy implements "populated" field in "cgroup.events"
+interface file which can be used to monitor whether the cgroup's
+subhierarchy has tasks in it or not. Its value is 0 if there is no
+task in the cgroup and its descendants; otherwise, 1. poll and
+[id]notify events are triggered when the value changes.
This is significantly lighter and simpler and trivially allows
delegating management of subhierarchy - subhierarchy monitoring can
@@ -374,6 +368,10 @@ supported and the interface files "release_agent" and
- The "cgroup.clone_children" file is removed.
+- /proc/PID/cgroup keeps reporting the cgroup that a zombie belonged
+ to before exiting. If the cgroup is removed before the zombie is
+ reaped, " (deleted)" is appeneded to the path.
+
5-3. Controller File Conventions
@@ -435,6 +433,11 @@ may be specified in any order and not all pairs have to be specified.
the first entry in the file. Specific entries can use "default" as
its value to indicate inheritance of the default value.
+- For events which are not very high frequency, an interface file
+ "events" should be created which lists event key value pairs.
+ Whenever a notifiable event happens, file modified event should be
+ generated on the file.
+
5-4. Per-Controller Changes
@@ -491,7 +494,7 @@ may be specified in any order and not all pairs have to be specified.
${R|W}BPS are read/write bytes per second and ${R|W}IOPS are
read/write IOs per second. "max" indicates no limit. Writing
to the file follows the same format but the individual
- settings may be ommitted or specified in any order.
+ settings may be omitted or specified in any order.
This file is available only on non-root cgroups.
diff --git a/Documentation/crypto/asymmetric-keys.txt b/Documentation/crypto/asymmetric-keys.txt
index b7675904a747..8c07e0ea6bc0 100644
--- a/Documentation/crypto/asymmetric-keys.txt
+++ b/Documentation/crypto/asymmetric-keys.txt
@@ -186,7 +186,7 @@ and looks like the following:
const struct public_key_signature *sig);
};
-Asymmetric keys point to this with their type_data[0] member.
+Asymmetric keys point to this with their payload[asym_subtype] member.
The owner and name fields should be set to the owning module and the name of
the subtype. Currently, the name is only used for print statements.
@@ -269,8 +269,7 @@ mandatory:
struct key_preparsed_payload {
char *description;
- void *type_data[2];
- void *payload;
+ void *payload[4];
const void *data;
size_t datalen;
size_t quotalen;
@@ -283,16 +282,18 @@ mandatory:
not theirs.
If the parser is happy with the blob, it should propose a description for
- the key and attach it to ->description, ->type_data[0] should be set to
- point to the subtype to be used, ->payload should be set to point to the
- initialised data for that subtype, ->type_data[1] should point to a hex
- fingerprint and quotalen should be updated to indicate how much quota this
- key should account for.
-
- When clearing up, the data attached to ->type_data[1] and ->description
- will be kfree()'d and the data attached to ->payload will be passed to the
- subtype's ->destroy() method to be disposed of. A module reference for
- the subtype pointed to by ->type_data[0] will be put.
+ the key and attach it to ->description, ->payload[asym_subtype] should be
+ set to point to the subtype to be used, ->payload[asym_crypto] should be
+ set to point to the initialised data for that subtype,
+ ->payload[asym_key_ids] should point to one or more hex fingerprints and
+ quotalen should be updated to indicate how much quota this key should
+ account for.
+
+ When clearing up, the data attached to ->payload[asym_key_ids] and
+ ->description will be kfree()'d and the data attached to
+ ->payload[asm_crypto] will be passed to the subtype's ->destroy() method
+ to be disposed of. A module reference for the subtype pointed to by
+ ->payload[asym_subtype] will be put.
If the data format is not recognised, -EBADMSG should be returned. If it
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
new file mode 100644
index 000000000000..b1f2ce17dff8
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
@@ -0,0 +1,22 @@
+Mediatek imgsys controller
+============================
+
+The Mediatek imgsys controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be:
+ - "mediatek,mt8173-imgsys", "syscon"
+- #clock-cells: Must be 1
+
+The imgsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+imgsys: clock-controller@15000000 {
+ compatible = "mediatek,mt8173-imgsys", "syscon";
+ reg = <0 0x15000000 0 0x1000>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
new file mode 100644
index 000000000000..4385946eadef
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
@@ -0,0 +1,22 @@
+Mediatek mmsys controller
+============================
+
+The Mediatek mmsys controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be:
+ - "mediatek,mt8173-mmsys", "syscon"
+- #clock-cells: Must be 1
+
+The mmsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+mmsys: clock-controller@14000000 {
+ compatible = "mediatek,mt8173-mmsys", "syscon";
+ reg = <0 0x14000000 0 0x1000>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
new file mode 100644
index 000000000000..1faacf1c1b25
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
@@ -0,0 +1,22 @@
+Mediatek vdecsys controller
+============================
+
+The Mediatek vdecsys controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be:
+ - "mediatek,mt8173-vdecsys", "syscon"
+- #clock-cells: Must be 1
+
+The vdecsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+vdecsys: clock-controller@16000000 {
+ compatible = "mediatek,mt8173-vdecsys", "syscon";
+ reg = <0 0x16000000 0 0x1000>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt
new file mode 100644
index 000000000000..3cc299fd7857
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt
@@ -0,0 +1,22 @@
+Mediatek vencltsys controller
+============================
+
+The Mediatek vencltsys controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be:
+ - "mediatek,mt8173-vencltsys", "syscon"
+- #clock-cells: Must be 1
+
+The vencltsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+vencltsys: clock-controller@19000000 {
+ compatible = "mediatek,mt8173-vencltsys", "syscon";
+ reg = <0 0x19000000 0 0x1000>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
new file mode 100644
index 000000000000..5bb2866a2b50
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
@@ -0,0 +1,22 @@
+Mediatek vencsys controller
+============================
+
+The Mediatek vencsys controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be:
+ - "mediatek,mt8173-vencsys", "syscon"
+- #clock-cells: Must be 1
+
+The vencsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+vencsys: clock-controller@18000000 {
+ compatible = "mediatek,mt8173-vencsys", "syscon";
+ reg = <0 0x18000000 0 0x1000>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt b/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt
new file mode 100644
index 000000000000..032a7606b862
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt
@@ -0,0 +1,21 @@
+Binding for Freescale QorIQ AHCI SATA Controller
+
+Required properties:
+ - reg: Physical base address and size of the controller's register area.
+ - compatible: Compatibility string. Must be 'fsl,<chip>-ahci', where
+ chip could be ls1021a, ls2080a, ls1043a etc.
+ - clocks: Input clock specifier. Refer to common clock bindings.
+ - interrupts: Interrupt specifier. Refer to interrupt binding.
+
+Optional properties:
+ - dma-coherent: Enable AHCI coherent DMA operation.
+ - reg-names: register area names when there are more than 1 register area.
+
+Examples:
+ sata@3200000 {
+ compatible = "fsl,ls1021a-ahci";
+ reg = <0x0 0x3200000 0x0 0x10000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&platform_clk 1>;
+ dma-coherent;
+ };
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index a2321819e7f5..c2340eeeb97f 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -16,8 +16,6 @@ Required properties:
- "snps,dwc-ahci"
- "snps,exynos5440-ahci"
- "snps,spear-ahci"
- - "fsl,qoriq-ahci" : for qoriq series socs which include ls1021, ls2085, etc.
- - "fsl,<chip>-ahci" : chip could be ls1021, ls2085 etc.
- "generic-ahci"
- interrupts : <interrupt mapping for SATA IRQ>
- reg : <registers mapping>
diff --git a/Documentation/devicetree/bindings/clock/at91-clock.txt b/Documentation/devicetree/bindings/clock/at91-clock.txt
index 5ba6450693b9..181bc8ac4e3a 100644
--- a/Documentation/devicetree/bindings/clock/at91-clock.txt
+++ b/Documentation/devicetree/bindings/clock/at91-clock.txt
@@ -77,6 +77,9 @@ Required properties:
"atmel,sama5d4-clk-h32mx":
at91 h32mx clock
+ "atmel,sama5d2-clk-generated":
+ at91 generated clock
+
Required properties for SCKC node:
- reg : defines the IO memory reserved for the SCKC.
- #size-cells : shall be 0 (reg is used to encode clk id).
@@ -461,3 +464,35 @@ For example:
compatible = "atmel,sama5d4-clk-h32mx";
clocks = <&mck>;
};
+
+Required properties for generated clocks:
+- #size-cells : shall be 0 (reg is used to encode clk id).
+- #address-cells : shall be 1 (reg is used to encode clk id).
+- clocks : shall be the generated clock source phandles.
+ e.g. clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>, <&audio_pll_pmc>;
+- name: device tree node describing a specific generated clock.
+ * #clock-cells : from common clock binding; shall be set to 0.
+ * reg: peripheral id. See Atmel's datasheets to get a full
+ list of peripheral ids.
+ * atmel,clk-output-range : minimum and maximum clock frequency
+ (two u32 fields).
+
+For example:
+ gck {
+ compatible = "atmel,sama5d2-clk-generated";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>, <&audio_pll_pmc>;
+
+ tcb0_gclk: tcb0_gclk {
+ #clock-cells = <0>;
+ reg = <35>;
+ atmel,clk-output-range = <0 83000000>;
+ };
+
+ pwm_gclk: pwm_gclk {
+ #clock-cells = <0>;
+ reg = <38>;
+ atmel,clk-output-range = <0 83000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt b/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt
new file mode 100644
index 000000000000..e56a1df3a9d3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt
@@ -0,0 +1,45 @@
+Broadcom BCM2835 CPRMAN clocks
+
+This binding uses the common clock binding:
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+The CPRMAN clock controller generates clocks in the audio power domain
+of the BCM2835. There is a level of PLLs deriving from an external
+oscillator, a level of PLL dividers that produce channels off of the
+few PLLs, and a level of mostly-generic clock generators sourcing from
+the PLL channels. Most other hardware components source from the
+clock generators, but a few (like the ARM or HDMI) will source from
+the PLL dividers directly.
+
+Required properties:
+- compatible: Should be "brcm,bcm2835-cprman"
+- #clock-cells: Should be <1>. The permitted clock-specifier values can be
+ found in include/dt-bindings/clock/bcm2835.h
+- reg: Specifies base physical address and size of the registers
+- clocks: The external oscillator clock phandle
+
+Example:
+
+ clk_osc: clock@3 {
+ compatible = "fixed-clock";
+ reg = <3>;
+ #clock-cells = <0>;
+ clock-output-names = "osc";
+ clock-frequency = <19200000>;
+ };
+
+ clocks: cprman@7e101000 {
+ compatible = "brcm,bcm2835-cprman";
+ #clock-cells = <1>;
+ reg = <0x7e101000 0x2000>;
+ clocks = <&clk_osc>;
+ };
+
+ i2c0: i2c@7e205000 {
+ compatible = "brcm,bcm2835-i2c";
+ reg = <0x7e205000 0x1000>;
+ interrupts = <2 21>;
+ clocks = <&clocks BCM2835_CLOCK_VPU>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt
index da8d9bb5751c..ede65a55e21b 100644
--- a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt
@@ -130,3 +130,81 @@ These clock IDs are defined in:
ch3_unused mipipll 4 BCM_CYGNUS_MIPIPLL_CH3_UNUSED
ch4_unused mipipll 5 BCM_CYGNUS_MIPIPLL_CH4_UNUSED
ch5_unused mipipll 6 BCM_CYGNUS_MIPIPLL_CH5_UNUSED
+
+Northstar and Northstar Plus
+------
+PLL and leaf clock compatible strings for Northstar and Northstar Plus are:
+ "brcm,nsp-armpll"
+ "brcm,nsp-genpll"
+ "brcm,nsp-lcpll0"
+
+The following table defines the set of PLL/clock index and ID for Northstar and
+Northstar Plus. These clock IDs are defined in:
+ "include/dt-bindings/clock/bcm-nsp.h"
+
+ Clock Source Index ID
+ --- ----- ----- ---------
+ crystal N/A N/A N/A
+
+ armpll crystal N/A N/A
+
+ genpll crystal 0 BCM_NSP_GENPLL
+ phy genpll 1 BCM_NSP_GENPLL_PHY_CLK
+ ethernetclk genpll 2 BCM_NSP_GENPLL_ENET_SW_CLK
+ usbclk genpll 3 BCM_NSP_GENPLL_USB_PHY_REF_CLK
+ iprocfast genpll 4 BCM_NSP_GENPLL_IPROCFAST_CLK
+ sata1 genpll 5 BCM_NSP_GENPLL_SATA1_CLK
+ sata2 genpll 6 BCM_NSP_GENPLL_SATA2_CLK
+
+ lcpll0 crystal 0 BCM_NSP_LCPLL0
+ pcie_phy lcpll0 1 BCM_NSP_LCPLL0_PCIE_PHY_REF_CLK
+ sdio lcpll0 2 BCM_NSP_LCPLL0_SDIO_CLK
+ ddr_phy lcpll0 3 BCM_NSP_LCPLL0_DDR_PHY_CLK
+
+Northstar 2
+-----------
+PLL and leaf clock compatible strings for Northstar 2 are:
+ "brcm,ns2-genpll-scr"
+ "brcm,ns2-genpll-sw"
+ "brcm,ns2-lcpll-ddr"
+ "brcm,ns2-lcpll-ports"
+
+The following table defines the set of PLL/clock index and ID for Northstar 2.
+These clock IDs are defined in:
+ "include/dt-bindings/clock/bcm-ns2.h"
+
+ Clock Source Index ID
+ --- ----- ----- ---------
+ crystal N/A N/A N/A
+
+ genpll_scr crystal 0 BCM_NS2_GENPLL_SCR
+ scr genpll_scr 1 BCM_NS2_GENPLL_SCR_SCR_CLK
+ fs genpll_scr 2 BCM_NS2_GENPLL_SCR_FS_CLK
+ audio_ref genpll_scr 3 BCM_NS2_GENPLL_SCR_AUDIO_CLK
+ ch3_unused genpll_scr 4 BCM_NS2_GENPLL_SCR_CH3_UNUSED
+ ch4_unused genpll_scr 5 BCM_NS2_GENPLL_SCR_CH4_UNUSED
+ ch5_unused genpll_scr 6 BCM_NS2_GENPLL_SCR_CH5_UNUSED
+
+ genpll_sw crystal 0 BCM_NS2_GENPLL_SW
+ rpe genpll_sw 1 BCM_NS2_GENPLL_SW_RPE_CLK
+ 250 genpll_sw 2 BCM_NS2_GENPLL_SW_250_CLK
+ nic genpll_sw 3 BCM_NS2_GENPLL_SW_NIC_CLK
+ chimp genpll_sw 4 BCM_NS2_GENPLL_SW_CHIMP_CLK
+ port genpll_sw 5 BCM_NS2_GENPLL_SW_PORT_CLK
+ sdio genpll_sw 6 BCM_NS2_GENPLL_SW_SDIO_CLK
+
+ lcpll_ddr crystal 0 BCM_NS2_LCPLL_DDR
+ pcie_sata_usb lcpll_ddr 1 BCM_NS2_LCPLL_DDR_PCIE_SATA_USB_CLK
+ ddr lcpll_ddr 2 BCM_NS2_LCPLL_DDR_DDR_CLK
+ ch2_unused lcpll_ddr 3 BCM_NS2_LCPLL_DDR_CH2_UNUSED
+ ch3_unused lcpll_ddr 4 BCM_NS2_LCPLL_DDR_CH3_UNUSED
+ ch4_unused lcpll_ddr 5 BCM_NS2_LCPLL_DDR_CH4_UNUSED
+ ch5_unused lcpll_ddr 6 BCM_NS2_LCPLL_DDR_CH5_UNUSED
+
+ lcpll_ports crystal 0 BCM_NS2_LCPLL_PORTS
+ wan lcpll_ports 1 BCM_NS2_LCPLL_PORTS_WAN_CLK
+ rgmii lcpll_ports 2 BCM_NS2_LCPLL_PORTS_RGMII_CLK
+ ch2_unused lcpll_ports 3 BCM_NS2_LCPLL_PORTS_CH2_UNUSED
+ ch3_unused lcpll_ports 4 BCM_NS2_LCPLL_PORTS_CH3_UNUSED
+ ch4_unused lcpll_ports 5 BCM_NS2_LCPLL_PORTS_CH4_UNUSED
+ ch5_unused lcpll_ports 6 BCM_NS2_LCPLL_PORTS_CH5_UNUSED
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt
index 5ddb68418655..38dcf0370143 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt
@@ -1,7 +1,7 @@
* Renesas CPG DIV6 Clock
The CPG DIV6 clocks are variable factor clocks provided by the Clock Pulse
-Generator (CPG). They clock input is divided by a configurable factor from 1
+Generator (CPG). Their clock input is divided by a configurable factor from 1
to 64.
Required Properties:
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
new file mode 100644
index 000000000000..59297d34b208
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
@@ -0,0 +1,69 @@
+* Renesas Clock Pulse Generator / Module Standby and Software Reset
+
+On Renesas ARM SoCs (SH/R-Mobile, R-Car, RZ), the CPG (Clock Pulse Generator)
+and MSSR (Module Standby and Software Reset) blocks are intimately connected,
+and share the same register block.
+
+They provide the following functionalities:
+ - The CPG block generates various core clocks,
+ - The MSSR block provides two functions:
+ 1. Module Standby, providing a Clock Domain to control the clock supply
+ to individual SoC devices,
+ 2. Reset Control, to perform a software reset of individual SoC devices.
+
+Required Properties:
+ - compatible: Must be one of:
+ - "renesas,r8a7795-cpg-mssr" for the r8a7795 SoC
+
+ - reg: Base address and length of the memory resource used by the CPG/MSSR
+ block
+
+ - clocks: References to external parent clocks, one entry for each entry in
+ clock-names
+ - clock-names: List of external parent clock names. Valid names are:
+ - "extal" (r8a7795)
+ - "extalr" (r8a7795)
+
+ - #clock-cells: Must be 2
+ - For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
+ and a core clock reference, as defined in
+ <dt-bindings/clock/*-cpg-mssr.h>.
+ - For module clocks, the two clock specifier cells must be "CPG_MOD" and
+ a module number, as defined in the datasheet.
+
+ - #power-domain-cells: Must be 0
+ - SoC devices that are part of the CPG/MSSR Clock Domain and can be
+ power-managed through Module Standby should refer to the CPG device
+ node in their "power-domains" property, as documented by the generic PM
+ Domain bindings in
+ Documentation/devicetree/bindings/power/power_domain.txt.
+
+
+Examples
+--------
+
+ - CPG device node:
+
+ cpg: clock-controller@e6150000 {
+ compatible = "renesas,r8a7795-cpg-mssr";
+ reg = <0 0xe6150000 0 0x1000>;
+ clocks = <&extal_clk>, <&extalr_clk>;
+ clock-names = "extal", "extalr";
+ #clock-cells = <2>;
+ #power-domain-cells = <0>;
+ };
+
+
+ - CPG/MSSR Clock Domain member device node:
+
+ scif2: serial@e6e88000 {
+ compatible = "renesas,scif-r8a7795", "renesas,scif";
+ reg = <0 0xe6e88000 0 64>;
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 310>;
+ clock-names = "sci_ick";
+ dmas = <&dmac1 0x13>, <&dmac1 0x12>;
+ dma-names = "tx", "rx";
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/clock/silabs,si514.txt b/Documentation/devicetree/bindings/clock/silabs,si514.txt
new file mode 100644
index 000000000000..ea1a9dbc63b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/silabs,si514.txt
@@ -0,0 +1,24 @@
+Binding for Silicon Labs 514 programmable I2C clock generator.
+
+Reference
+This binding uses the common clock binding[1]. Details about the device can be
+found in the datasheet[2].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Si514 datasheet
+ http://www.silabs.com/Support%20Documents/TechnicalDocs/si514.pdf
+
+Required properties:
+ - compatible: Shall be "silabs,si514"
+ - reg: I2C device address.
+ - #clock-cells: From common clock bindings: Shall be 0.
+
+Optional properties:
+ - clock-output-names: From common clock bindings. Recommended to be "si514".
+
+Example:
+ si514: clock-generator@55 {
+ reg = <0x55>;
+ #clock-cells = <0>;
+ compatible = "silabs,si514";
+ };
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
index d8b168ebd5f1..844b3a0976bf 100644
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
@@ -23,6 +23,7 @@ Required properties:
"st,stih407-plls-c32-a9", "st,clkgen-plls-c32"
"sst,plls-c32-cx_0", "st,clkgen-plls-c32"
"sst,plls-c32-cx_1", "st,clkgen-plls-c32"
+ "st,stih418-plls-c28-a9", "st,clkgen-plls-c32"
"st,stih415-gpu-pll-c32", "st,clkgengpu-pll-c32"
"st,stih416-gpu-pll-c32", "st,clkgengpu-pll-c32"
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
index 3443e0f838df..947863acc2d4 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
@@ -36,5 +36,24 @@ the PCIe specification.
NOTE: this only applies to the SMMU itself, not
masters connected upstream of the SMMU.
+- msi-parent : See the generic MSI binding described in
+ devicetree/bindings/interrupt-controller/msi.txt
+ for a description of the msi-parent property.
+
- hisilicon,broken-prefetch-cmd
: Avoid sending CMD_PREFETCH_* commands to the SMMU.
+
+** Example
+
+ smmu@2b400000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0x2b400000 0x0 0x20000>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 77 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eventq", "priq", "cmdq-sync", "gerror";
+ dma-coherent;
+ #iommu-cells = <0>;
+ msi-parent = <&its 0xff0000>;
+ };
diff --git a/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt b/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
index 869699925fd5..4bd10dd881b8 100644
--- a/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible : Should be one of,
"ti,omap2-iommu" for OMAP2/OMAP3 IOMMU instances
"ti,omap4-iommu" for OMAP4/OMAP5 IOMMU instances
+ "ti,dra7-dsp-iommu" for DRA7xx DSP IOMMU instances
"ti,dra7-iommu" for DRA7xx IOMMU instances
- ti,hwmods : Name of the hwmod associated with the IOMMU instance
- reg : Address space for the configuration registers
@@ -19,6 +20,13 @@ Optional properties:
Should be either 8 or 32 (default: 32)
- ti,iommu-bus-err-back : Indicates the IOMMU instance supports throwing
back a bus error response on MMU faults.
+- ti,syscon-mmuconfig : Should be a pair of the phandle to the DSP_SYSTEM
+ syscon node that contains the additional control
+ register for enabling the MMU, and the MMU instance
+ number (0-indexed) within the sub-system. This property
+ is required for DSP IOMMU instances on DRA7xx SoCs. The
+ instance number should be 0 for DSP MDMA MMUs and 1 for
+ DSP EDMA MMUs.
Example:
/* OMAP3 ISP MMU */
@@ -30,3 +38,22 @@ Example:
ti,hwmods = "mmu_isp";
ti,#tlb-entries = <8>;
};
+
+ /* DRA74x DSP2 MMUs */
+ mmu0_dsp2: mmu@41501000 {
+ compatible = "ti,dra7-dsp-iommu";
+ reg = <0x41501000 0x100>;
+ interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "mmu0_dsp2";
+ #iommu-cells = <0>;
+ ti,syscon-mmuconfig = <&dsp2_system 0x0>;
+ };
+
+ mmu1_dsp2: mmu@41502000 {
+ compatible = "ti,dra7-dsp-iommu";
+ reg = <0x41502000 0x100>;
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "mmu1_dsp2";
+ #iommu-cells = <0>;
+ ti,syscon-mmuconfig = <&dsp2_system 0x1>;
+ };
diff --git a/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt b/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt
index 4ef45636ebde..38941db23dd2 100644
--- a/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt
+++ b/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt
@@ -4,7 +4,8 @@ Required properties:
- compatible : should be one of:
"samsung,s5pv210-jpeg", "samsung,exynos4210-jpeg",
- "samsung,exynos3250-jpeg", "samsung,exynos5420-jpeg";
+ "samsung,exynos3250-jpeg", "samsung,exynos5420-jpeg",
+ "samsung,exynos5433-jpeg";
- reg : address and length of the JPEG codec IP register set;
- interrupts : specifies the JPEG codec IP interrupt;
- clock-names : should contain:
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
index 41811223e5be..a474359dd206 100644
--- a/Documentation/devicetree/bindings/mfd/axp20x.txt
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -60,8 +60,8 @@ DCDC2 : DC-DC buck : vin2-supply
DCDC3 : DC-DC buck : vin3-supply
DCDC4 : DC-DC buck : vin4-supply
DCDC5 : DC-DC buck : vin5-supply
-DC1SW : On/Off Switch : dcdc1-supply : DCDC1 secondary output
-DC5LDO : LDO : dcdc5-supply : input from DCDC5
+DC1SW : On/Off Switch : : DCDC1 secondary output
+DC5LDO : LDO : : input from DCDC5
ALDO1 : LDO : aldoin-supply : shared supply
ALDO2 : LDO : aldoin-supply : shared supply
ALDO3 : LDO : aldoin-supply : shared supply
diff --git a/Documentation/devicetree/bindings/power/bq24257.txt b/Documentation/devicetree/bindings/power/bq24257.txt
index 5c9d3940d07c..d693702c9c1e 100644
--- a/Documentation/devicetree/bindings/power/bq24257.txt
+++ b/Documentation/devicetree/bindings/power/bq24257.txt
@@ -1,21 +1,64 @@
-Binding for TI bq24257 Li-Ion Charger
+Binding for TI bq24250/bq24251/bq24257 Li-Ion Charger
Required properties:
- compatible: Should contain one of the following:
+ * "ti,bq24250"
+ * "ti,bq24251"
* "ti,bq24257"
-- reg: integer, i2c address of the device.
+- reg: integer, i2c address of the device.
+- interrupt-parent: Should be the phandle for the interrupt controller. Use in
+ conjunction with "interrupts".
+- interrupts: Interrupt mapping for GPIO IRQ (configure for both edges). Use in
+ conjunction with "interrupt-parent".
- ti,battery-regulation-voltage: integer, maximum charging voltage in uV.
-- ti,charge-current: integer, maximum charging current in uA.
-- ti,termination-current: integer, charge will be terminated when current in
- constant-voltage phase drops below this value (in uA).
+- ti,charge-current: integer, maximum charging current in uA.
+- ti,termination-current: integer, charge will be terminated when current in
+ constant-voltage phase drops below this value (in uA).
+
+Optional properties:
+- pg-gpios: GPIO used for connecting the bq2425x device PG (Power Good) pin.
+ This pin is not available on all devices however it should be used if
+ possible as this is the recommended way to obtain the charger's input PG
+ state. If this pin is not specified a software-based approach for PG
+ detection is used.
+- ti,current-limit: The maximum current to be drawn from the charger's input
+ (in uA). If this property is not specified, the input limit current is
+ set automatically using USB D+/D- signal based charger type detection.
+ If the hardware does not support the D+/D- based detection, a default
+ of 500,000 is used (=500mA) instead.
+- ti,ovp-voltage: Configures the over voltage protection voltage (in uV). If
+ not specified a default of 6,5000,000 (=6.5V) is used.
+- ti,in-dpm-voltage: Configures the threshold input voltage for the dynamic
+ power path management (in uV). If not specified a default of 4,360,000
+ (=4.36V) is used.
Example:
bq24257 {
compatible = "ti,bq24257";
reg = <0x6a>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <16 IRQ_TYPE_EDGE_BOTH>;
+
+ pg-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
ti,battery-regulation-voltage = <4200000>;
ti,charge-current = <1000000>;
ti,termination-current = <50000>;
};
+
+Example:
+
+bq24250 {
+ compatible = "ti,bq24250";
+ reg = <0x6a>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <16 IRQ_TYPE_EDGE_BOTH>;
+
+ ti,battery-regulation-voltage = <4200000>;
+ ti,charge-current = <500000>;
+ ti,termination-current = <50000>;
+ ti,current-limit = <900000>;
+ ti,ovp-voltage = <9500000>;
+ ti,in-dpm-voltage = <4440000>;
+};
diff --git a/Documentation/devicetree/bindings/power_supply/axp20x_usb_power.txt b/Documentation/devicetree/bindings/power_supply/axp20x_usb_power.txt
new file mode 100644
index 000000000000..862f4a49dc49
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/axp20x_usb_power.txt
@@ -0,0 +1,34 @@
+AXP20x USB power supply
+
+Required Properties:
+-compatible: "x-powers,axp202-usb-power-supply"
+
+This node is a subnode of the axp20x PMIC.
+
+Example:
+
+axp209: pmic@34 {
+ compatible = "x-powers,axp209";
+ reg = <0x34>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ regulators {
+ x-powers,dcdc-freq = <1500>;
+
+ vdd_cpu: dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-name = "vdd-cpu";
+ };
+
+ ...
+ };
+
+ usb-power-supply: usb-power-supply {
+ compatible = "x-powers,axp202-usb-power-supply";
+ };
+};
diff --git a/Documentation/devicetree/bindings/power_supply/qcom_smbb.txt b/Documentation/devicetree/bindings/power_supply/qcom_smbb.txt
new file mode 100644
index 000000000000..65b88fac854b
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/qcom_smbb.txt
@@ -0,0 +1,131 @@
+Qualcomm Switch-Mode Battery Charger and Boost
+
+PROPERTIES
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Description: Must be one of:
+ - "qcom,pm8941-charger"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Description: Base address of registers for SMBB block
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Description: The format of the specifier is defined by the binding document
+ describing the node's interrupt parent. Must contain one
+ specifier for each of the following interrupts, in order:
+ - charge done
+ - charge fast mode
+ - charge trickle mode
+ - battery temperature ok
+ - battery present
+ - charger disconnected
+ - USB-in valid
+ - DC-in valid
+
+- interrupt-names:
+ Usage: required
+ Value type: <stringlist>
+ Description: Must contain the following list, strictly ordered:
+ "chg-done",
+ "chg-fast",
+ "chg-trkl",
+ "bat-temp-ok",
+ "bat-present",
+ "chg-gone",
+ "usb-valid",
+ "dc-valid"
+
+- qcom,fast-charge-current-limit:
+ Usage: optional (default: 1A, or pre-configured value)
+ Value type: <u32>; uA; range [100mA : 3A]
+ Description: Maximum charge current; May be clamped to safety limits.
+
+- qcom,fast-charge-low-threshold-voltage:
+ Usage: optional (default: 3.2V, or pre-configured value)
+ Value type: <u32>; uV; range [2.1V : 3.6V]
+ Description: Battery voltage limit above which fast charging may operate;
+ Below this value linear or switch-mode auto-trickle-charging
+ will operate.
+
+- qcom,fast-charge-high-threshold-voltage:
+ Usage: optional (default: 4.2V, or pre-configured value)
+ Value type: <u32>; uV; range [3.24V : 5V]
+ Description: Battery voltage limit below which fast charging may operate;
+ The fast charger will attempt to charge the battery to this
+ voltage. May be clamped to safety limits.
+
+- qcom,fast-charge-safe-voltage:
+ Usage: optional (default: 4.2V, or pre-configured value)
+ Value type: <u32>; uV; range [3.24V : 5V]
+ Description: Maximum safe battery voltage; May be pre-set by bootloader, in
+ which case, setting this will harmlessly fail. The property
+ 'fast-charge-high-watermark' will be clamped by this value.
+
+- qcom,fast-charge-safe-current:
+ Usage: optional (default: 1A, or pre-configured value)
+ Value type: <u32>; uA; range [100mA : 3A]
+ Description: Maximum safe battery charge current; May pre-set by bootloader,
+ in which case, setting this will harmlessly fail. The property
+ 'qcom,fast-charge-current-limit' will be clamped by this value.
+
+- qcom,auto-recharge-threshold-voltage:
+ Usage: optional (default: 4.1V, or pre-configured value)
+ Value type: <u32>; uV; range [3.24V : 5V]
+ Description: Battery voltage limit below which auto-recharge functionality
+ will restart charging after end-of-charge; The high cutoff
+ limit for auto-recharge is 5% above this value.
+
+- qcom,minimum-input-voltage:
+ Usage: optional (default: 4.3V, or pre-configured value)
+ Value type: <u32>; uV; range [4.2V : 9.6V]
+ Description: Input voltage level above which charging may operate
+
+- qcom,dc-current-limit:
+ Usage: optional (default: 100mA, or pre-configured value)
+ Value type: <u32>; uA; range [100mA : 2.5A]
+ Description: Default DC charge current limit
+
+- qcom,disable-dc:
+ Usage: optional (default: false)
+ Value type: boolean: <u32> or <empty>
+ Description: Disable DC charger
+
+- qcom,jeita-extended-temp-range:
+ Usage: optional (default: false)
+ Value type: boolean: <u32> or <empty>
+ Description: Enable JEITA extended temperature range; This does *not*
+ adjust the maximum charge voltage or current in the extended
+ temperature range. It only allows charging when the battery
+ is in the extended temperature range. Voltage/current
+ regulation must be done externally to fully comply with
+ the JEITA safety guidelines if this flag is set.
+
+EXAMPLE
+charger@1000 {
+ compatible = "qcom,pm8941-charger";
+ reg = <0x1000 0x700>;
+ interrupts = <0x0 0x10 7 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x10 5 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x10 4 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x12 1 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x12 0 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x13 2 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x13 1 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x14 1 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "chg-done",
+ "chg-fast",
+ "chg-trkl",
+ "bat-temp-ok",
+ "bat-present",
+ "chg-gone",
+ "usb-valid",
+ "dc-valid";
+
+ qcom,fast-charge-current-limit = <1000000>;
+ qcom,dc-charge-current-limit = <1000000>;
+};
diff --git a/Documentation/devicetree/bindings/power_supply/tps65217_charger.txt b/Documentation/devicetree/bindings/power_supply/tps65217_charger.txt
new file mode 100644
index 000000000000..98d131acee95
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/tps65217_charger.txt
@@ -0,0 +1,12 @@
+TPS65217 Charger
+
+Required Properties:
+-compatible: "ti,tps65217-charger"
+
+This node is a subnode of the tps65217 PMIC.
+
+Example:
+
+ tps65217-charger {
+ compatible = "ti,tps65090-charger";
+ };
diff --git a/Documentation/devicetree/bindings/regulator/act8865-regulator.txt b/Documentation/devicetree/bindings/regulator/act8865-regulator.txt
index e91485d11241..6067d9830d07 100644
--- a/Documentation/devicetree/bindings/regulator/act8865-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/act8865-regulator.txt
@@ -8,6 +8,8 @@ Required properties:
Optional properties:
- system-power-controller: Telling whether or not this pmic is controlling
the system power. See Documentation/devicetree/bindings/power/power-controller.txt .
+- active-semi,vsel-high: Indicates the VSEL pin is high.
+ If this property is missing, assume the VSEL pin is low(0).
Optional input supply properties:
- for act8600:
@@ -49,6 +51,7 @@ Example:
pmic: act8865@5b {
compatible = "active-semi,act8865";
reg = <0x5b>;
+ active-semi,vsel-high;
status = "disabled";
regulators {
diff --git a/Documentation/devicetree/bindings/regulator/anatop-regulator.txt b/Documentation/devicetree/bindings/regulator/anatop-regulator.txt
index 758eae24082a..37c4ea076f88 100644
--- a/Documentation/devicetree/bindings/regulator/anatop-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/anatop-regulator.txt
@@ -13,6 +13,7 @@ Optional properties:
- anatop-delay-reg-offset: Anatop MFD step time register offset
- anatop-delay-bit-shift: Bit shift for the step time register
- anatop-delay-bit-width: Number of bits used in the step time register
+- vin-supply: The supply for this regulator
Any property defined as part of the core regulator
binding, defined in regulator.txt, can also be used.
diff --git a/Documentation/devicetree/bindings/regulator/arizona-regulator.txt b/Documentation/devicetree/bindings/regulator/arizona-regulator.txt
new file mode 100644
index 000000000000..443564d7784f
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/arizona-regulator.txt
@@ -0,0 +1,17 @@
+Cirrus Logic Arizona class audio SoCs
+
+These devices are audio SoCs with extensive digital capabilities and a range
+of analogue I/O.
+
+This document lists regulator specific bindings, see the primary binding
+document:
+ ../mfd/arizona.txt
+
+Optional properties:
+ - wlf,ldoena : GPIO specifier for the GPIO controlling LDOENA
+
+Optional subnodes:
+ - ldo1 : Initial data for the LDO1 regulator, as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt
+ - micvdd : Initial data for the MICVDD regulator, as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt
diff --git a/Documentation/devicetree/bindings/regulator/max77802.txt b/Documentation/devicetree/bindings/regulator/max77802.txt
index 79e5476444f7..09d796ed48be 100644
--- a/Documentation/devicetree/bindings/regulator/max77802.txt
+++ b/Documentation/devicetree/bindings/regulator/max77802.txt
@@ -8,7 +8,28 @@ regulators that can be controlled over I2C.
Following properties should be present in main device node of the MFD chip.
-Optional node:
+Optional properties:
+- inb1-supply: The input supply for BUCK1
+- inb2-supply: The input supply for BUCK2
+- inb3-supply: The input supply for BUCK3
+- inb4-supply: The input supply for BUCK4
+- inb5-supply: The input supply for BUCK5
+- inb6-supply: The input supply for BUCK6
+- inb7-supply: The input supply for BUCK7
+- inb8-supply: The input supply for BUCK8
+- inb9-supply: The input supply for BUCK9
+- inb10-supply: The input supply for BUCK10
+- inl1-supply: The input supply for LDO8 and LDO15
+- inl2-supply: The input supply for LDO17, LDO27, LDO30 and LDO35
+- inl3-supply: The input supply for LDO3, LDO5, LDO6 and LDO7
+- inl4-supply: The input supply for LDO10, LDO11, LDO13 and LDO14
+- inl5-supply: The input supply for LDO9 and LDO19
+- inl6-supply: The input supply for LDO4, LDO21, LDO24 and LDO33
+- inl7-supply: The input supply for LDO18, LDO20, LDO28 and LDO29
+- inl9-supply: The input supply for LDO12, LDO23, LDO25, LDO26, LDO32 and LDO34
+- inl10-supply: The input supply for LDO1 and LDO2
+
+Optional nodes:
- regulators : The regulators of max77802 have to be instantiated
under subnode named "regulators" using the following format.
@@ -58,6 +79,8 @@ Example:
#address-cells = <1>;
#size-cells = <0>;
+ inb1-supply = <&parent_reg>;
+
regulators {
ldo1_reg: LDO1 {
regulator-name = "vdd_1v0";
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index 24bd422cecd5..1d112fc456aa 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -11,6 +11,7 @@ Optional properties:
- regulator-always-on: boolean, regulator should never be disabled
- regulator-boot-on: bootloader/firmware enabled regulator
- regulator-allow-bypass: allow the regulator to go into bypass mode
+- regulator-allow-set-load: allow the regulator performance level to be configured
- <name>-supply: phandle to the parent supply/regulator node
- regulator-ramp-delay: ramp delay for regulator(in uV/uS)
For hardware which supports disabling ramp rate, it should be explicitly
diff --git a/Documentation/devicetree/bindings/regulator/tps65023.txt b/Documentation/devicetree/bindings/regulator/tps65023.txt
new file mode 100644
index 000000000000..a4714e4da370
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/tps65023.txt
@@ -0,0 +1,60 @@
+TPS65023 family of regulators
+
+Required properties:
+- compatible: Must be one of the following.
+ "ti,tps65020",
+ "ti,tps65021",
+ "ti,tps65023",
+- reg: I2C slave address
+- regulators: list of regulators provided by this controller, must be named
+ after their hardware counterparts: VDCDC[1-3] and LDO[1-2]
+- regulators: This is the list of child nodes that specify the regulator
+ initialization data for defined regulators. The definition for each of
+ these nodes is defined using the standard binding for regulators found at
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Each regulator is defined using the standard binding for regulators.
+
+Example:
+
+ tps65023@48 {
+ compatible = "ti,tps65023";
+ reg = <0x48>;
+
+ regulators {
+ VDCDC1 {
+ regulator-name = "vdd_mpu";
+ regulator-always-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ VDCDC2 {
+ regulator-name = "vdd_core";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ VDCDC3 {
+ regulator-name = "vdd_io";
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ LDO1 {
+ regulator-name = "vdd_usb18";
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ LDO2 {
+ regulator-name = "vdd_usb33";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/brcm,bcm2835-aux-spi.txt b/Documentation/devicetree/bindings/spi/brcm,bcm2835-aux-spi.txt
new file mode 100644
index 000000000000..9887b0724759
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/brcm,bcm2835-aux-spi.txt
@@ -0,0 +1,38 @@
+Broadcom BCM2835 auxiliar SPI1/2 controller
+
+The BCM2835 contains two forms of SPI master controller, one known simply as
+SPI0, and the other known as the "Universal SPI Master"; part of the
+auxiliary block. This binding applies to the SPI1/2 controller.
+
+Required properties:
+- compatible: Should be "brcm,bcm2835-aux-spi".
+- reg: Should contain register location and length for the spi block
+- interrupts: Should contain shared interrupt of the aux block
+- clocks: The clock feeding the SPI controller - needs to
+ point to the auxiliar clock driver of the bcm2835,
+ as this clock will enable the output gate for the specific
+ clock.
+- cs-gpios: the cs-gpios (native cs is NOT supported)
+ see also spi-bus.txt
+
+Example:
+
+spi1@7e215080 {
+ compatible = "brcm,bcm2835-aux-spi";
+ reg = <0x7e215080 0x40>;
+ interrupts = <1 29>;
+ clocks = <&aux_clocks BCM2835_AUX_CLOCK_SPI1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cs-gpios = <&gpio 18>, <&gpio 17>, <&gpio 16>;
+};
+
+spi2@7e2150c0 {
+ compatible = "brcm,bcm2835-aux-spi";
+ reg = <0x7e2150c0 0x40>;
+ interrupts = <1 29>;
+ clocks = <&aux_clocks BCM2835_AUX_CLOCK_SPI2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cs-gpios = <&gpio 43>, <&gpio 44>, <&gpio 45>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
index 6160ffbcb3d3..ce363c923f44 100644
--- a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
@@ -29,8 +29,11 @@ Required properties:
muxes clock, and "spi-clk" for the clock gate.
Optional properties:
+-cs-gpios: see spi-bus.txt, only required for MT8173.
+
- mediatek,pad-select: specify which pins group(ck/mi/mo/cs) spi
- controller used, this value should be 0~3, only required for MT8173.
+ controller used. This is a array, the element value should be 0~3,
+ only required for MT8173.
0: specify GPIO69,70,71,72 for spi pins.
1: specify GPIO102,103,104,105 for spi pins.
2: specify GPIO128,129,130,131 for spi pins.
@@ -49,7 +52,7 @@ spi: spi@1100a000 {
<&topckgen CLK_TOP_SPI_SEL>,
<&pericfg CLK_PERI_SPI0>;
clock-names = "parent-clk", "sel-clk", "spi-clk";
-
- mediatek,pad-select = <0>;
+ cs-gpios = <&pio 105 GPIO_ACTIVE_LOW>, <&pio 72 GPIO_ACTIVE_LOW>;
+ mediatek,pad-select = <1>, <0>;
status = "disabled";
};
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index 3fa450881ecb..aba85b39a400 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -220,7 +220,7 @@ to coerce it into behaving.
Compose dialog.
Please note that "external editor" requires that your editor must not
- fork, or in other words, the editor must not return before closing.
+ fork, or in other words, the editor must not return before closing.
You may have to pass additional flags or change the settings of your
editor. Most notably if you are using gvim then you must pass the -f
option to gvim by putting "/usr/bin/gvim -f" (if the binary is in
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index e2d5105b7214..b102b436563e 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -102,7 +102,8 @@ background_gc=%s Turn on/off cleaning operations, namely garbage
collection, triggered in background when I/O subsystem is
idle. If background_gc=on, it will turn on the garbage
collection and if background_gc=off, garbage collection
- will be truned off.
+ will be truned off. If background_gc=sync, it will turn
+ on synchronous garbage collection running in background.
Default value for this option is on. So garbage
collection is on by default.
disable_roll_forward Disable the roll-forward recovery routine
diff --git a/Documentation/filesystems/path-lookup.md b/Documentation/filesystems/path-lookup.md
new file mode 100644
index 000000000000..1b39e084a2b2
--- /dev/null
+++ b/Documentation/filesystems/path-lookup.md
@@ -0,0 +1,1297 @@
+<head>
+<style> p { max-width:50em} ol, ul {max-width: 40em}</style>
+</head>
+
+Pathname lookup in Linux.
+=========================
+
+This write-up is based on three articles published at lwn.net:
+
+- <https://lwn.net/Articles/649115/> Pathname lookup in Linux
+- <https://lwn.net/Articles/649729/> RCU-walk: faster pathname lookup in Linux
+- <https://lwn.net/Articles/650786/> A walk among the symlinks
+
+Written by Neil Brown with help from Al Viro and Jon Corbet.
+
+Introduction
+------------
+
+The most obvious aspect of pathname lookup, which very little
+exploration is needed to discover, is that it is complex. There are
+many rules, special cases, and implementation alternatives that all
+combine to confuse the unwary reader. Computer science has long been
+acquainted with such complexity and has tools to help manage it. One
+tool that we will make extensive use of is "divide and conquer". For
+the early parts of the analysis we will divide off symlinks - leaving
+them until the final part. Well before we get to symlinks we have
+another major division based on the VFS's approach to locking which
+will allow us to review "REF-walk" and "RCU-walk" separately. But we
+are getting ahead of ourselves. There are some important low level
+distinctions we need to clarify first.
+
+There are two sorts of ...
+--------------------------
+
+[`openat()`]: http://man7.org/linux/man-pages/man2/openat.2.html
+
+Pathnames (sometimes "file names"), used to identify objects in the
+filesystem, will be familiar to most readers. They contain two sorts
+of elements: "slashes" that are sequences of one or more "`/`"
+characters, and "components" that are sequences of one or more
+non-"`/`" characters. These form two kinds of paths. Those that
+start with slashes are "absolute" and start from the filesystem root.
+The others are "relative" and start from the current directory, or
+from some other location specified by a file descriptor given to a
+"xxx`at`" system call such as "[`openat()`]".
+
+[`execveat()`]: http://man7.org/linux/man-pages/man2/execveat.2.html
+
+It is tempting to describe the second kind as starting with a
+component, but that isn't always accurate: a pathname can lack both
+slashes and components, it can be empty, in other words. This is
+generally forbidden in POSIX, but some of those "xxx`at`" system calls
+in Linux permit it when the `AT_EMPTY_PATH` flag is given. For
+example, if you have an open file descriptor on an executable file you
+can execute it by calling [`execveat()`] passing the file descriptor,
+an empty path, and the `AT_EMPTY_PATH` flag.
+
+These paths can be divided into two sections: the final component and
+everything else. The "everything else" is the easy bit. In all cases
+it must identify a directory that already exists, otherwise an error
+such as `ENOENT` or `ENOTDIR` will be reported.
+
+The final component is not so simple. Not only do different system
+calls interpret it quite differently (e.g. some create it, some do
+not), but it might not even exist: neither the empty pathname nor the
+pathname that is just slashes have a final component. If it does
+exist, it could be "`.`" or "`..`" which are handled quite differently
+from other components.
+
+[POSIX]: http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_12
+
+If a pathname ends with a slash, such as "`/tmp/foo/`" it might be
+tempting to consider that to have an empty final component. In many
+ways that would lead to correct results, but not always. In
+particular, `mkdir()` and `rmdir()` each create or remove a directory named
+by the final component, and they are required to work with pathnames
+ending in "`/`". According to [POSIX]
+
+> A pathname that contains at least one non- &lt;slash> character and
+> that ends with one or more trailing &lt;slash> characters shall not
+> be resolved successfully unless the last pathname component before
+> the trailing <slash> characters names an existing directory or a
+> directory entry that is to be created for a directory immediately
+> after the pathname is resolved.
+
+The Linux pathname walking code (mostly in `fs/namei.c`) deals with
+all of these issues: breaking the path into components, handling the
+"everything else" quite separately from the final component, and
+checking that the trailing slash is not used where it isn't
+permitted. It also addresses the important issue of concurrent
+access.
+
+While one process is looking up a pathname, another might be making
+changes that affect that lookup. One fairly extreme case is that if
+"a/b" were renamed to "a/c/b" while another process were looking up
+"a/b/..", that process might successfully resolve on "a/c".
+Most races are much more subtle, and a big part of the task of
+pathname lookup is to prevent them from having damaging effects. Many
+of the possible races are seen most clearly in the context of the
+"dcache" and an understanding of that is central to understanding
+pathname lookup.
+
+More than just a cache.
+-----------------------
+
+The "dcache" caches information about names in each filesystem to
+make them quickly available for lookup. Each entry (known as a
+"dentry") contains three significant fields: a component name, a
+pointer to a parent dentry, and a pointer to the "inode" which
+contains further information about the object in that parent with
+the given name. The inode pointer can be `NULL` indicating that the
+name doesn't exist in the parent. While there can be linkage in the
+dentry of a directory to the dentries of the children, that linkage is
+not used for pathname lookup, and so will not be considered here.
+
+The dcache has a number of uses apart from accelerating lookup. One
+that will be particularly relevant is that it is closely integrated
+with the mount table that records which filesystem is mounted where.
+What the mount table actually stores is which dentry is mounted on top
+of which other dentry.
+
+When considering the dcache, we have another of our "two types"
+distinctions: there are two types of filesystems.
+
+Some filesystems ensure that the information in the dcache is always
+completely accurate (though not necessarily complete). This can allow
+the VFS to determine if a particular file does or doesn't exist
+without checking with the filesystem, and means that the VFS can
+protect the filesystem against certain races and other problems.
+These are typically "local" filesystems such as ext3, XFS, and Btrfs.
+
+Other filesystems don't provide that guarantee because they cannot.
+These are typically filesystems that are shared across a network,
+whether remote filesystems like NFS and 9P, or cluster filesystems
+like ocfs2 or cephfs. These filesystems allow the VFS to revalidate
+cached information, and must provide their own protection against
+awkward races. The VFS can detect these filesystems by the
+`DCACHE_OP_REVALIDATE` flag being set in the dentry.
+
+REF-walk: simple concurrency management with refcounts and spinlocks
+--------------------------------------------------------------------
+
+With all of those divisions carefully classified, we can now start
+looking at the actual process of walking along a path. In particular
+we will start with the handling of the "everything else" part of a
+pathname, and focus on the "REF-walk" approach to concurrency
+management. This code is found in the `link_path_walk()` function, if
+you ignore all the places that only run when "`LOOKUP_RCU`"
+(indicating the use of RCU-walk) is set.
+
+[Meet the Lockers]: https://lwn.net/Articles/453685/
+
+REF-walk is fairly heavy-handed with locks and reference counts. Not
+as heavy-handed as in the old "big kernel lock" days, but certainly not
+afraid of taking a lock when one is needed. It uses a variety of
+different concurrency controls. A background understanding of the
+various primitives is assumed, or can be gleaned from elsewhere such
+as in [Meet the Lockers].
+
+The locking mechanisms used by REF-walk include:
+
+### dentry->d_lockref ###
+
+This uses the lockref primitive to provide both a spinlock and a
+reference count. The special-sauce of this primitive is that the
+conceptual sequence "lock; inc_ref; unlock;" can often be performed
+with a single atomic memory operation.
+
+Holding a reference on a dentry ensures that the dentry won't suddenly
+be freed and used for something else, so the values in various fields
+will behave as expected. It also protects the `->d_inode` reference
+to the inode to some extent.
+
+The association between a dentry and its inode is fairly permanent.
+For example, when a file is renamed, the dentry and inode move
+together to the new location. When a file is created the dentry will
+initially be negative (i.e. `d_inode` is `NULL`), and will be assigned
+to the new inode as part of the act of creation.
+
+When a file is deleted, this can be reflected in the cache either by
+setting `d_inode` to `NULL`, or by removing it from the hash table
+(described shortly) used to look up the name in the parent directory.
+If the dentry is still in use the second option is used as it is
+perfectly legal to keep using an open file after it has been deleted
+and having the dentry around helps. If the dentry is not otherwise in
+use (i.e. if the refcount in `d_lockref` is one), only then will
+`d_inode` be set to `NULL`. Doing it this way is more efficient for a
+very common case.
+
+So as long as a counted reference is held to a dentry, a non-`NULL` `->d_inode`
+value will never be changed.
+
+### dentry->d_lock ###
+
+`d_lock` is a synonym for the spinlock that is part of `d_lockref` above.
+For our purposes, holding this lock protects against the dentry being
+renamed or unlinked. In particular, its parent (`d_parent`), and its
+name (`d_name`) cannot be changed, and it cannot be removed from the
+dentry hash table.
+
+When looking for a name in a directory, REF-walk takes `d_lock` on
+each candidate dentry that it finds in the hash table and then checks
+that the parent and name are correct. So it doesn't lock the parent
+while searching in the cache; it only locks children.
+
+When looking for the parent for a given name (to handle "`..`"),
+REF-walk can take `d_lock` to get a stable reference to `d_parent`,
+but it first tries a more lightweight approach. As seen in
+`dget_parent()`, if a reference can be claimed on the parent, and if
+subsequently `d_parent` can be seen to have not changed, then there is
+no need to actually take the lock on the child.
+
+### rename_lock ###
+
+Looking up a given name in a given directory involves computing a hash
+from the two values (the name and the dentry of the directory),
+accessing that slot in a hash table, and searching the linked list
+that is found there.
+
+When a dentry is renamed, the name and the parent dentry can both
+change so the hash will almost certainly change too. This would move the
+dentry to a different chain in the hash table. If a filename search
+happened to be looking at a dentry that was moved in this way,
+it might end up continuing the search down the wrong chain,
+and so miss out on part of the correct chain.
+
+The name-lookup process (`d_lookup()`) does _not_ try to prevent this
+from happening, but only to detect when it happens.
+`rename_lock` is a seqlock that is updated whenever any dentry is
+renamed. If `d_lookup` finds that a rename happened while it
+unsuccessfully scanned a chain in the hash table, it simply tries
+again.
+
+### inode->i_mutex ###
+
+`i_mutex` is a mutex that serializes all changes to a particular
+directory. This ensures that, for example, an `unlink()` and a `rename()`
+cannot both happen at the same time. It also keeps the directory
+stable while the filesystem is asked to look up a name that is not
+currently in the dcache.
+
+This has a complementary role to that of `d_lock`: `i_mutex` on a
+directory protects all of the names in that directory, while `d_lock`
+on a name protects just one name in a directory. Most changes to the
+dcache hold `i_mutex` on the relevant directory inode and briefly take
+`d_lock` on one or more the dentries while the change happens. One
+exception is when idle dentries are removed from the dcache due to
+memory pressure. This uses `d_lock`, but `i_mutex` plays no role.
+
+The mutex affects pathname lookup in two distinct ways. Firstly it
+serializes lookup of a name in a directory. `walk_component()` uses
+`lookup_fast()` first which, in turn, checks to see if the name is in the cache,
+using only `d_lock` locking. If the name isn't found, then `walk_component()`
+falls back to `lookup_slow()` which takes `i_mutex`, checks again that
+the name isn't in the cache, and then calls in to the filesystem to get a
+definitive answer. A new dentry will be added to the cache regardless of
+the result.
+
+Secondly, when pathname lookup reaches the final component, it will
+sometimes need to take `i_mutex` before performing the last lookup so
+that the required exclusion can be achieved. How path lookup chooses
+to take, or not take, `i_mutex` is one of the
+issues addressed in a subsequent section.
+
+### mnt->mnt_count ###
+
+`mnt_count` is a per-CPU reference counter on "`mount`" structures.
+Per-CPU here means that incrementing the count is cheap as it only
+uses CPU-local memory, but checking if the count is zero is expensive as
+it needs to check with every CPU. Taking a `mnt_count` reference
+prevents the mount structure from disappearing as the result of regular
+unmount operations, but does not prevent a "lazy" unmount. So holding
+`mnt_count` doesn't ensure that the mount remains in the namespace and,
+in particular, doesn't stabilize the link to the mounted-on dentry. It
+does, however, ensure that the `mount` data structure remains coherent,
+and it provides a reference to the root dentry of the mounted
+filesystem. So a reference through `->mnt_count` provides a stable
+reference to the mounted dentry, but not the mounted-on dentry.
+
+### mount_lock ###
+
+`mount_lock` is a global seqlock, a bit like `rename_lock`. It can be used to
+check if any change has been made to any mount points.
+
+While walking down the tree (away from the root) this lock is used when
+crossing a mount point to check that the crossing was safe. That is,
+the value in the seqlock is read, then the code finds the mount that
+is mounted on the current directory, if there is one, and increments
+the `mnt_count`. Finally the value in `mount_lock` is checked against
+the old value. If there is no change, then the crossing was safe. If there
+was a change, the `mnt_count` is decremented and the whole process is
+retried.
+
+When walking up the tree (towards the root) by following a ".." link,
+a little more care is needed. In this case the seqlock (which
+contains both a counter and a spinlock) is fully locked to prevent
+any changes to any mount points while stepping up. This locking is
+needed to stabilize the link to the mounted-on dentry, which the
+refcount on the mount itself doesn't ensure.
+
+### RCU ###
+
+Finally the global (but extremely lightweight) RCU read lock is held
+from time to time to ensure certain data structures don't get freed
+unexpectedly.
+
+In particular it is held while scanning chains in the dcache hash
+table, and the mount point hash table.
+
+Bringing it together with `struct nameidata`
+--------------------------------------------
+
+[First edition Unix]: http://minnie.tuhs.org/cgi-bin/utree.pl?file=V1/u2.s
+
+Throughout the process of walking a path, the current status is stored
+in a `struct nameidata`, "namei" being the traditional name - dating
+all the way back to [First Edition Unix] - of the function that
+converts a "name" to an "inode". `struct nameidata` contains (among
+other fields):
+
+### `struct path path` ###
+
+A `path` contains a `struct vfsmount` (which is
+embedded in a `struct mount`) and a `struct dentry`. Together these
+record the current status of the walk. They start out referring to the
+starting point (the current working directory, the root directory, or some other
+directory identified by a file descriptor), and are updated on each
+step. A reference through `d_lockref` and `mnt_count` is always
+held.
+
+### `struct qstr last` ###
+
+This is a string together with a length (i.e. _not_ `nul` terminated)
+that is the "next" component in the pathname.
+
+### `int last_type` ###
+
+This is one of `LAST_NORM`, `LAST_ROOT`, `LAST_DOT`, `LAST_DOTDOT`, or
+`LAST_BIND`. The `last` field is only valid if the type is
+`LAST_NORM`. `LAST_BIND` is used when following a symlink and no
+components of the symlink have been processed yet. Others should be
+fairly self-explanatory.
+
+### `struct path root` ###
+
+This is used to hold a reference to the effective root of the
+filesystem. Often that reference won't be needed, so this field is
+only assigned the first time it is used, or when a non-standard root
+is requested. Keeping a reference in the `nameidata` ensures that
+only one root is in effect for the entire path walk, even if it races
+with a `chroot()` system call.
+
+The root is needed when either of two conditions holds: (1) either the
+pathname or a symbolic link starts with a "'/'", or (2) a "`..`"
+component is being handled, since "`..`" from the root must always stay
+at the root. The value used is usually the current root directory of
+the calling process. An alternate root can be provided as when
+`sysctl()` calls `file_open_root()`, and when NFSv4 or Btrfs call
+`mount_subtree()`. In each case a pathname is being looked up in a very
+specific part of the filesystem, and the lookup must not be allowed to
+escape that subtree. It works a bit like a local `chroot()`.
+
+Ignoring the handling of symbolic links, we can now describe the
+"`link_path_walk()`" function, which handles the lookup of everything
+except the final component as:
+
+> Given a path (`name`) and a nameidata structure (`nd`), check that the
+> current directory has execute permission and then advance `name`
+> over one component while updating `last_type` and `last`. If that
+> was the final component, then return, otherwise call
+> `walk_component()` and repeat from the top.
+
+`walk_component()` is even easier. If the component is `LAST_DOTS`,
+it calls `handle_dots()` which does the necessary locking as already
+described. If it finds a `LAST_NORM` component it first calls
+"`lookup_fast()`" which only looks in the dcache, but will ask the
+filesystem to revalidate the result if it is that sort of filesystem.
+If that doesn't get a good result, it calls "`lookup_slow()`" which
+takes the `i_mutex`, rechecks the cache, and then asks the filesystem
+to find a definitive answer. Each of these will call
+`follow_managed()` (as described below) to handle any mount points.
+
+In the absence of symbolic links, `walk_component()` creates a new
+`struct path` containing a counted reference to the new dentry and a
+reference to the new `vfsmount` which is only counted if it is
+different from the previous `vfsmount`. It then calls
+`path_to_nameidata()` to install the new `struct path` in the
+`struct nameidata` and drop the unneeded references.
+
+This "hand-over-hand" sequencing of getting a reference to the new
+dentry before dropping the reference to the previous dentry may
+seem obvious, but is worth pointing out so that we will recognize its
+analogue in the "RCU-walk" version.
+
+Handling the final component.
+-----------------------------
+
+`link_path_walk()` only walks as far as setting `nd->last` and
+`nd->last_type` to refer to the final component of the path. It does
+not call `walk_component()` that last time. Handling that final
+component remains for the caller to sort out. Those callers are
+`path_lookupat()`, `path_parentat()`, `path_mountpoint()` and
+`path_openat()` each of which handles the differing requirements of
+different system calls.
+
+`path_parentat()` is clearly the simplest - it just wraps a little bit
+of housekeeping around `link_path_walk()` and returns the parent
+directory and final component to the caller. The caller will be either
+aiming to create a name (via `filename_create()`) or remove or rename
+a name (in which case `user_path_parent()` is used). They will use
+`i_mutex` to exclude other changes while they validate and then
+perform their operation.
+
+`path_lookupat()` is nearly as simple - it is used when an existing
+object is wanted such as by `stat()` or `chmod()`. It essentially just
+calls `walk_component()` on the final component through a call to
+`lookup_last()`. `path_lookupat()` returns just the final dentry.
+
+`path_mountpoint()` handles the special case of unmounting which must
+not try to revalidate the mounted filesystem. It effectively
+contains, through a call to `mountpoint_last()`, an alternate
+implementation of `lookup_slow()` which skips that step. This is
+important when unmounting a filesystem that is inaccessible, such as
+one provided by a dead NFS server.
+
+Finally `path_openat()` is used for the `open()` system call; it
+contains, in support functions starting with "`do_last()`", all the
+complexity needed to handle the different subtleties of O_CREAT (with
+or without O_EXCL), final "`/`" characters, and trailing symbolic
+links. We will revisit this in the final part of this series, which
+focuses on those symbolic links. "`do_last()`" will sometimes, but
+not always, take `i_mutex`, depending on what it finds.
+
+Each of these, or the functions which call them, need to be alert to
+the possibility that the final component is not `LAST_NORM`. If the
+goal of the lookup is to create something, then any value for
+`last_type` other than `LAST_NORM` will result in an error. For
+example if `path_parentat()` reports `LAST_DOTDOT`, then the caller
+won't try to create that name. They also check for trailing slashes
+by testing `last.name[last.len]`. If there is any character beyond
+the final component, it must be a trailing slash.
+
+Revalidation and automounts
+---------------------------
+
+Apart from symbolic links, there are only two parts of the "REF-walk"
+process not yet covered. One is the handling of stale cache entries
+and the other is automounts.
+
+On filesystems that require it, the lookup routines will call the
+`->d_revalidate()` dentry method to ensure that the cached information
+is current. This will often confirm validity or update a few details
+from a server. In some cases it may find that there has been change
+further up the path and that something that was thought to be valid
+previously isn't really. When this happens the lookup of the whole
+path is aborted and retried with the "`LOOKUP_REVAL`" flag set. This
+forces revalidation to be more thorough. We will see more details of
+this retry process in the next article.
+
+Automount points are locations in the filesystem where an attempt to
+lookup a name can trigger changes to how that lookup should be
+handled, in particular by mounting a filesystem there. These are
+covered in greater detail in autofs4.txt in the Linux documentation
+tree, but a few notes specifically related to path lookup are in order
+here.
+
+The Linux VFS has a concept of "managed" dentries which is reflected
+in function names such as "`follow_managed()`". There are three
+potentially interesting things about these dentries corresponding
+to three different flags that might be set in `dentry->d_flags`:
+
+### `DCACHE_MANAGE_TRANSIT` ###
+
+If this flag has been set, then the filesystem has requested that the
+`d_manage()` dentry operation be called before handling any possible
+mount point. This can perform two particular services:
+
+It can block to avoid races. If an automount point is being
+unmounted, the `d_manage()` function will usually wait for that
+process to complete before letting the new lookup proceed and possibly
+trigger a new automount.
+
+It can selectively allow only some processes to transit through a
+mount point. When a server process is managing automounts, it may
+need to access a directory without triggering normal automount
+processing. That server process can identify itself to the `autofs`
+filesystem, which will then give it a special pass through
+`d_manage()` by returning `-EISDIR`.
+
+### `DCACHE_MOUNTED` ###
+
+This flag is set on every dentry that is mounted on. As Linux
+supports multiple filesystem namespaces, it is possible that the
+dentry may not be mounted on in *this* namespace, just in some
+other. So this flag is seen as a hint, not a promise.
+
+If this flag is set, and `d_manage()` didn't return `-EISDIR`,
+`lookup_mnt()` is called to examine the mount hash table (honoring the
+`mount_lock` described earlier) and possibly return a new `vfsmount`
+and a new `dentry` (both with counted references).
+
+### `DCACHE_NEED_AUTOMOUNT` ###
+
+If `d_manage()` allowed us to get this far, and `lookup_mnt()` didn't
+find a mount point, then this flag causes the `d_automount()` dentry
+operation to be called.
+
+The `d_automount()` operation can be arbitrarily complex and may
+communicate with server processes etc. but it should ultimately either
+report that there was an error, that there was nothing to mount, or
+should provide an updated `struct path` with new `dentry` and `vfsmount`.
+
+In the latter case, `finish_automount()` will be called to safely
+install the new mount point into the mount table.
+
+There is no new locking of import here and it is important that no
+locks (only counted references) are held over this processing due to
+the very real possibility of extended delays.
+This will become more important next time when we examine RCU-walk
+which is particularly sensitive to delays.
+
+RCU-walk - faster pathname lookup in Linux
+==========================================
+
+RCU-walk is another algorithm for performing pathname lookup in Linux.
+It is in many ways similar to REF-walk and the two share quite a bit
+of code. The significant difference in RCU-walk is how it allows for
+the possibility of concurrent access.
+
+We noted that REF-walk is complex because there are numerous details
+and special cases. RCU-walk reduces this complexity by simply
+refusing to handle a number of cases -- it instead falls back to
+REF-walk. The difficulty with RCU-walk comes from a different
+direction: unfamiliarity. The locking rules when depending on RCU are
+quite different from traditional locking, so we will spend a little extra
+time when we come to those.
+
+Clear demarcation of roles
+--------------------------
+
+The easiest way to manage concurrency is to forcibly stop any other
+thread from changing the data structures that a given thread is
+looking at. In cases where no other thread would even think of
+changing the data and lots of different threads want to read at the
+same time, this can be very costly. Even when using locks that permit
+multiple concurrent readers, the simple act of updating the count of
+the number of current readers can impose an unwanted cost. So the
+goal when reading a shared data structure that no other process is
+changing is to avoid writing anything to memory at all. Take no
+locks, increment no counts, leave no footprints.
+
+The REF-walk mechanism already described certainly doesn't follow this
+principle, but then it is really designed to work when there may well
+be other threads modifying the data. RCU-walk, in contrast, is
+designed for the common situation where there are lots of frequent
+readers and only occasional writers. This may not be common in all
+parts of the filesystem tree, but in many parts it will be. For the
+other parts it is important that RCU-walk can quickly fall back to
+using REF-walk.
+
+Pathname lookup always starts in RCU-walk mode but only remains there
+as long as what it is looking for is in the cache and is stable. It
+dances lightly down the cached filesystem image, leaving no footprints
+and carefully watching where it is, to be sure it doesn't trip. If it
+notices that something has changed or is changing, or if something
+isn't in the cache, then it tries to stop gracefully and switch to
+REF-walk.
+
+This stopping requires getting a counted reference on the current
+`vfsmount` and `dentry`, and ensuring that these are still valid -
+that a path walk with REF-walk would have found the same entries.
+This is an invariant that RCU-walk must guarantee. It can only make
+decisions, such as selecting the next step, that are decisions which
+REF-walk could also have made if it were walking down the tree at the
+same time. If the graceful stop succeeds, the rest of the path is
+processed with the reliable, if slightly sluggish, REF-walk. If
+RCU-walk finds it cannot stop gracefully, it simply gives up and
+restarts from the top with REF-walk.
+
+This pattern of "try RCU-walk, if that fails try REF-walk" can be
+clearly seen in functions like `filename_lookup()`,
+`filename_parentat()`, `filename_mountpoint()`,
+`do_filp_open()`, and `do_file_open_root()`. These five
+correspond roughly to the four `path_`* functions we met earlier,
+each of which calls `link_path_walk()`. The `path_*` functions are
+called using different mode flags until a mode is found which works.
+They are first called with `LOOKUP_RCU` set to request "RCU-walk". If
+that fails with the error `ECHILD` they are called again with no
+special flag to request "REF-walk". If either of those report the
+error `ESTALE` a final attempt is made with `LOOKUP_REVAL` set (and no
+`LOOKUP_RCU`) to ensure that entries found in the cache are forcibly
+revalidated - normally entries are only revalidated if the filesystem
+determines that they are too old to trust.
+
+The `LOOKUP_RCU` attempt may drop that flag internally and switch to
+REF-walk, but will never then try to switch back to RCU-walk. Places
+that trip up RCU-walk are much more likely to be near the leaves and
+so it is very unlikely that there will be much, if any, benefit from
+switching back.
+
+RCU and seqlocks: fast and light
+--------------------------------
+
+RCU is, unsurprisingly, critical to RCU-walk mode. The
+`rcu_read_lock()` is held for the entire time that RCU-walk is walking
+down a path. The particular guarantee it provides is that the key
+data structures - dentries, inodes, super_blocks, and mounts - will
+not be freed while the lock is held. They might be unlinked or
+invalidated in one way or another, but the memory will not be
+repurposed so values in various fields will still be meaningful. This
+is the only guarantee that RCU provides; everything else is done using
+seqlocks.
+
+As we saw above, REF-walk holds a counted reference to the current
+dentry and the current vfsmount, and does not release those references
+before taking references to the "next" dentry or vfsmount. It also
+sometimes takes the `d_lock` spinlock. These references and locks are
+taken to prevent certain changes from happening. RCU-walk must not
+take those references or locks and so cannot prevent such changes.
+Instead, it checks to see if a change has been made, and aborts or
+retries if it has.
+
+To preserve the invariant mentioned above (that RCU-walk may only make
+decisions that REF-walk could have made), it must make the checks at
+or near the same places that REF-walk holds the references. So, when
+REF-walk increments a reference count or takes a spinlock, RCU-walk
+samples the status of a seqlock using `read_seqcount_begin()` or a
+similar function. When REF-walk decrements the count or drops the
+lock, RCU-walk checks if the sampled status is still valid using
+`read_seqcount_retry()` or similar.
+
+However, there is a little bit more to seqlocks than that. If
+RCU-walk accesses two different fields in a seqlock-protected
+structure, or accesses the same field twice, there is no a priori
+guarantee of any consistency between those accesses. When consistency
+is needed - which it usually is - RCU-walk must take a copy and then
+use `read_seqcount_retry()` to validate that copy.
+
+`read_seqcount_retry()` not only checks the sequence number, but also
+imposes a memory barrier so that no memory-read instruction from
+*before* the call can be delayed until *after* the call, either by the
+CPU or by the compiler. A simple example of this can be seen in
+`slow_dentry_cmp()` which, for filesystems which do not use simple
+byte-wise name equality, calls into the filesystem to compare a name
+against a dentry. The length and name pointer are copied into local
+variables, then `read_seqcount_retry()` is called to confirm the two
+are consistent, and only then is `->d_compare()` called. When
+standard filename comparison is used, `dentry_cmp()` is called
+instead. Notably it does _not_ use `read_seqcount_retry()`, but
+instead has a large comment explaining why the consistency guarantee
+isn't necessary. A subsequent `read_seqcount_retry()` will be
+sufficient to catch any problem that could occur at this point.
+
+With that little refresher on seqlocks out of the way we can look at
+the bigger picture of how RCU-walk uses seqlocks.
+
+### `mount_lock` and `nd->m_seq` ###
+
+We already met the `mount_lock` seqlock when REF-walk used it to
+ensure that crossing a mount point is performed safely. RCU-walk uses
+it for that too, but for quite a bit more.
+
+Instead of taking a counted reference to each `vfsmount` as it
+descends the tree, RCU-walk samples the state of `mount_lock` at the
+start of the walk and stores this initial sequence number in the
+`struct nameidata` in the `m_seq` field. This one lock and one
+sequence number are used to validate all accesses to all `vfsmounts`,
+and all mount point crossings. As changes to the mount table are
+relatively rare, it is reasonable to fall back on REF-walk any time
+that any "mount" or "unmount" happens.
+
+`m_seq` is checked (using `read_seqretry()`) at the end of an RCU-walk
+sequence, whether switching to REF-walk for the rest of the path or
+when the end of the path is reached. It is also checked when stepping
+down over a mount point (in `__follow_mount_rcu()`) or up (in
+`follow_dotdot_rcu()`). If it is ever found to have changed, the
+whole RCU-walk sequence is aborted and the path is processed again by
+REF-walk.
+
+If RCU-walk finds that `mount_lock` hasn't changed then it can be sure
+that, had REF-walk taken counted references on each vfsmount, the
+results would have been the same. This ensures the invariant holds,
+at least for vfsmount structures.
+
+### `dentry->d_seq` and `nd->seq`. ###
+
+In place of taking a count or lock on `d_reflock`, RCU-walk samples
+the per-dentry `d_seq` seqlock, and stores the sequence number in the
+`seq` field of the nameidata structure, so `nd->seq` should always be
+the current sequence number of `nd->dentry`. This number needs to be
+revalidated after copying, and before using, the name, parent, or
+inode of the dentry.
+
+The handling of the name we have already looked at, and the parent is
+only accessed in `follow_dotdot_rcu()` which fairly trivially follows
+the required pattern, though it does so for three different cases.
+
+When not at a mount point, `d_parent` is followed and its `d_seq` is
+collected. When we are at a mount point, we instead follow the
+`mnt->mnt_mountpoint` link to get a new dentry and collect its
+`d_seq`. Then, after finally finding a `d_parent` to follow, we must
+check if we have landed on a mount point and, if so, must find that
+mount point and follow the `mnt->mnt_root` link. This would imply a
+somewhat unusual, but certainly possible, circumstance where the
+starting point of the path lookup was in part of the filesystem that
+was mounted on, and so not visible from the root.
+
+The inode pointer, stored in `->d_inode`, is a little more
+interesting. The inode will always need to be accessed at least
+twice, once to determine if it is NULL and once to verify access
+permissions. Symlink handling requires a validated inode pointer too.
+Rather than revalidating on each access, a copy is made on the first
+access and it is stored in the `inode` field of `nameidata` from where
+it can be safely accessed without further validation.
+
+`lookup_fast()` is the only lookup routine that is used in RCU-mode,
+`lookup_slow()` being too slow and requiring locks. It is in
+`lookup_fast()` that we find the important "hand over hand" tracking
+of the current dentry.
+
+The current `dentry` and current `seq` number are passed to
+`__d_lookup_rcu()` which, on success, returns a new `dentry` and a
+new `seq` number. `lookup_fast()` then copies the inode pointer and
+revalidates the new `seq` number. It then validates the old `dentry`
+with the old `seq` number one last time and only then continues. This
+process of getting the `seq` number of the new dentry and then
+checking the `seq` number of the old exactly mirrors the process of
+getting a counted reference to the new dentry before dropping that for
+the old dentry which we saw in REF-walk.
+
+### No `inode->i_mutex` or even `rename_lock` ###
+
+A mutex is a fairly heavyweight lock that can only be taken when it is
+permissible to sleep. As `rcu_read_lock()` forbids sleeping,
+`inode->i_mutex` plays no role in RCU-walk. If some other thread does
+take `i_mutex` and modifies the directory in a way that RCU-walk needs
+to notice, the result will be either that RCU-walk fails to find the
+dentry that it is looking for, or it will find a dentry which
+`read_seqretry()` won't validate. In either case it will drop down to
+REF-walk mode which can take whatever locks are needed.
+
+Though `rename_lock` could be used by RCU-walk as it doesn't require
+any sleeping, RCU-walk doesn't bother. REF-walk uses `rename_lock` to
+protect against the possibility of hash chains in the dcache changing
+while they are being searched. This can result in failing to find
+something that actually is there. When RCU-walk fails to find
+something in the dentry cache, whether it is really there or not, it
+already drops down to REF-walk and tries again with appropriate
+locking. This neatly handles all cases, so adding extra checks on
+rename_lock would bring no significant value.
+
+`unlazy walk()` and `complete_walk()`
+-------------------------------------
+
+That "dropping down to REF-walk" typically involves a call to
+`unlazy_walk()`, so named because "RCU-walk" is also sometimes
+referred to as "lazy walk". `unlazy_walk()` is called when
+following the path down to the current vfsmount/dentry pair seems to
+have proceeded successfully, but the next step is problematic. This
+can happen if the next name cannot be found in the dcache, if
+permission checking or name revalidation couldn't be achieved while
+the `rcu_read_lock()` is held (which forbids sleeping), if an
+automount point is found, or in a couple of cases involving symlinks.
+It is also called from `complete_walk()` when the lookup has reached
+the final component, or the very end of the path, depending on which
+particular flavor of lookup is used.
+
+Other reasons for dropping out of RCU-walk that do not trigger a call
+to `unlazy_walk()` are when some inconsistency is found that cannot be
+handled immediately, such as `mount_lock` or one of the `d_seq`
+seqlocks reporting a change. In these cases the relevant function
+will return `-ECHILD` which will percolate up until it triggers a new
+attempt from the top using REF-walk.
+
+For those cases where `unlazy_walk()` is an option, it essentially
+takes a reference on each of the pointers that it holds (vfsmount,
+dentry, and possibly some symbolic links) and then verifies that the
+relevant seqlocks have not been changed. If there have been changes,
+it, too, aborts with `-ECHILD`, otherwise the transition to REF-walk
+has been a success and the lookup process continues.
+
+Taking a reference on those pointers is not quite as simple as just
+incrementing a counter. That works to take a second reference if you
+already have one (often indirectly through another object), but it
+isn't sufficient if you don't actually have a counted reference at
+all. For `dentry->d_lockref`, it is safe to increment the reference
+counter to get a reference unless it has been explicitly marked as
+"dead" which involves setting the counter to `-128`.
+`lockref_get_not_dead()` achieves this.
+
+For `mnt->mnt_count` it is safe to take a reference as long as
+`mount_lock` is then used to validate the reference. If that
+validation fails, it may *not* be safe to just drop that reference in
+the standard way of calling `mnt_put()` - an unmount may have
+progressed too far. So the code in `legitimize_mnt()`, when it
+finds that the reference it got might not be safe, checks the
+`MNT_SYNC_UMOUNT` flag to determine if a simple `mnt_put()` is
+correct, or if it should just decrement the count and pretend none of
+this ever happened.
+
+Taking care in filesystems
+---------------------------
+
+RCU-walk depends almost entirely on cached information and often will
+not call into the filesystem at all. However there are two places,
+besides the already-mentioned component-name comparison, where the
+file system might be included in RCU-walk, and it must know to be
+careful.
+
+If the filesystem has non-standard permission-checking requirements -
+such as a networked filesystem which may need to check with the server
+- the `i_op->permission` interface might be called during RCU-walk.
+In this case an extra "`MAY_NOT_BLOCK`" flag is passed so that it
+knows not to sleep, but to return `-ECHILD` if it cannot complete
+promptly. `i_op->permission` is given the inode pointer, not the
+dentry, so it doesn't need to worry about further consistency checks.
+However if it accesses any other filesystem data structures, it must
+ensure they are safe to be accessed with only the `rcu_read_lock()`
+held. This typically means they must be freed using `kfree_rcu()` or
+similar.
+
+[`READ_ONCE()`]: https://lwn.net/Articles/624126/
+
+If the filesystem may need to revalidate dcache entries, then
+`d_op->d_revalidate` may be called in RCU-walk too. This interface
+*is* passed the dentry but does not have access to the `inode` or the
+`seq` number from the `nameidata`, so it needs to be extra careful
+when accessing fields in the dentry. This "extra care" typically
+involves using `ACCESS_ONCE()` or the newer [`READ_ONCE()`] to access
+fields, and verifying the result is not NULL before using it. This
+pattern can be see in `nfs_lookup_revalidate()`.
+
+A pair of patterns
+------------------
+
+In various places in the details of REF-walk and RCU-walk, and also in
+the big picture, there are a couple of related patterns that are worth
+being aware of.
+
+The first is "try quickly and check, if that fails try slowly". We
+can see that in the high-level approach of first trying RCU-walk and
+then trying REF-walk, and in places where `unlazy_walk()` is used to
+switch to REF-walk for the rest of the path. We also saw it earlier
+in `dget_parent()` when following a "`..`" link. It tries a quick way
+to get a reference, then falls back to taking locks if needed.
+
+The second pattern is "try quickly and check, if that fails try
+again - repeatedly". This is seen with the use of `rename_lock` and
+`mount_lock` in REF-walk. RCU-walk doesn't make use of this pattern -
+if anything goes wrong it is much safer to just abort and try a more
+sedate approach.
+
+The emphasis here is "try quickly and check". It should probably be
+"try quickly _and carefully,_ then check". The fact that checking is
+needed is a reminder that the system is dynamic and only a limited
+number of things are safe at all. The most likely cause of errors in
+this whole process is assuming something is safe when in reality it
+isn't. Careful consideration of what exactly guarantees the safety of
+each access is sometimes necessary.
+
+A walk among the symlinks
+=========================
+
+There are several basic issues that we will examine to understand the
+handling of symbolic links: the symlink stack, together with cache
+lifetimes, will help us understand the overall recursive handling of
+symlinks and lead to the special care needed for the final component.
+Then a consideration of access-time updates and summary of the various
+flags controlling lookup will finish the story.
+
+The symlink stack
+-----------------
+
+There are only two sorts of filesystem objects that can usefully
+appear in a path prior to the final component: directories and symlinks.
+Handling directories is quite straightforward: the new directory
+simply becomes the starting point at which to interpret the next
+component on the path. Handling symbolic links requires a bit more
+work.
+
+Conceptually, symbolic links could be handled by editing the path. If
+a component name refers to a symbolic link, then that component is
+replaced by the body of the link and, if that body starts with a '/',
+then all preceding parts of the path are discarded. This is what the
+"`readlink -f`" command does, though it also edits out "`.`" and
+"`..`" components.
+
+Directly editing the path string is not really necessary when looking
+up a path, and discarding early components is pointless as they aren't
+looked at anyway. Keeping track of all remaining components is
+important, but they can of course be kept separately; there is no need
+to concatenate them. As one symlink may easily refer to another,
+which in turn can refer to a third, we may need to keep the remaining
+components of several paths, each to be processed when the preceding
+ones are completed. These path remnants are kept on a stack of
+limited size.
+
+There are two reasons for placing limits on how many symlinks can
+occur in a single path lookup. The most obvious is to avoid loops.
+If a symlink referred to itself either directly or through
+intermediaries, then following the symlink can never complete
+successfully - the error `ELOOP` must be returned. Loops can be
+detected without imposing limits, but limits are the simplest solution
+and, given the second reason for restriction, quite sufficient.
+
+[outlined recently]: http://thread.gmane.org/gmane.linux.kernel/1934390/focus=1934550
+
+The second reason was [outlined recently] by Linus:
+
+> Because it's a latency and DoS issue too. We need to react well to
+> true loops, but also to "very deep" non-loops. It's not about memory
+> use, it's about users triggering unreasonable CPU resources.
+
+Linux imposes a limit on the length of any pathname: `PATH_MAX`, which
+is 4096. There are a number of reasons for this limit; not letting the
+kernel spend too much time on just one path is one of them. With
+symbolic links you can effectively generate much longer paths so some
+sort of limit is needed for the same reason. Linux imposes a limit of
+at most 40 symlinks in any one path lookup. It previously imposed a
+further limit of eight on the maximum depth of recursion, but that was
+raised to 40 when a separate stack was implemented, so there is now
+just the one limit.
+
+The `nameidata` structure that we met in an earlier article contains a
+small stack that can be used to store the remaining part of up to two
+symlinks. In many cases this will be sufficient. If it isn't, a
+separate stack is allocated with room for 40 symlinks. Pathname
+lookup will never exceed that stack as, once the 40th symlink is
+detected, an error is returned.
+
+It might seem that the name remnants are all that needs to be stored on
+this stack, but we need a bit more. To see that, we need to move on to
+cache lifetimes.
+
+Storage and lifetime of cached symlinks
+---------------------------------------
+
+Like other filesystem resources, such as inodes and directory
+entries, symlinks are cached by Linux to avoid repeated costly access
+to external storage. It is particularly important for RCU-walk to be
+able to find and temporarily hold onto these cached entries, so that
+it doesn't need to drop down into REF-walk.
+
+[object-oriented design pattern]: https://lwn.net/Articles/446317/
+
+While each filesystem is free to make its own choice, symlinks are
+typically stored in one of two places. Short symlinks are often
+stored directly in the inode. When a filesystem allocates a `struct
+inode` it typically allocates extra space to store private data (a
+common [object-oriented design pattern] in the kernel). This will
+sometimes include space for a symlink. The other common location is
+in the page cache, which normally stores the content of files. The
+pathname in a symlink can be seen as the content of that symlink and
+can easily be stored in the page cache just like file content.
+
+When neither of these is suitable, the next most likely scenario is
+that the filesystem will allocate some temporary memory and copy or
+construct the symlink content into that memory whenever it is needed.
+
+When the symlink is stored in the inode, it has the same lifetime as
+the inode which, itself, is protected by RCU or by a counted reference
+on the dentry. This means that the mechanisms that pathname lookup
+uses to access the dcache and icache (inode cache) safely are quite
+sufficient for accessing some cached symlinks safely. In these cases,
+the `i_link` pointer in the inode is set to point to wherever the
+symlink is stored and it can be accessed directly whenever needed.
+
+When the symlink is stored in the page cache or elsewhere, the
+situation is not so straightforward. A reference on a dentry or even
+on an inode does not imply any reference on cached pages of that
+inode, and even an `rcu_read_lock()` is not sufficient to ensure that
+a page will not disappear. So for these symlinks the pathname lookup
+code needs to ask the filesystem to provide a stable reference and,
+significantly, needs to release that reference when it is finished
+with it.
+
+Taking a reference to a cache page is often possible even in RCU-walk
+mode. It does require making changes to memory, which is best avoided,
+but that isn't necessarily a big cost and it is better than dropping
+out of RCU-walk mode completely. Even filesystems that allocate
+space to copy the symlink into can use `GFP_ATOMIC` to often successfully
+allocate memory without the need to drop out of RCU-walk. If a
+filesystem cannot successfully get a reference in RCU-walk mode, it
+must return `-ECHILD` and `unlazy_walk()` will be called to return to
+REF-walk mode in which the filesystem is allowed to sleep.
+
+The place for all this to happen is the `i_op->follow_link()` inode
+method. In the present mainline code this is never actually called in
+RCU-walk mode as the rewrite is not quite complete. It is likely that
+in a future release this method will be passed an `inode` pointer when
+called in RCU-walk mode so it both (1) knows to be careful, and (2) has the
+validated pointer. Much like the `i_op->permission()` method we
+looked at previously, `->follow_link()` would need to be careful that
+all the data structures it references are safe to be accessed while
+holding no counted reference, only the RCU lock. Though getting a
+reference with `->follow_link()` is not yet done in RCU-walk mode, the
+code is ready to release the reference when that does happen.
+
+This need to drop the reference to a symlink adds significant
+complexity. It requires a reference to the inode so that the
+`i_op->put_link()` inode operation can be called. In REF-walk, that
+reference is kept implicitly through a reference to the dentry, so
+keeping the `struct path` of the symlink is easiest. For RCU-walk,
+the pointer to the inode is kept separately. To allow switching from
+RCU-walk back to REF-walk in the middle of processing nested symlinks
+we also need the seq number for the dentry so we can confirm that
+switching back was safe.
+
+Finally, when providing a reference to a symlink, the filesystem also
+provides an opaque "cookie" that must be passed to `->put_link()` so that it
+knows what to free. This might be the allocated memory area, or a
+pointer to the `struct page` in the page cache, or something else
+completely. Only the filesystem knows what it is.
+
+In order for the reference to each symlink to be dropped when the walk completes,
+whether in RCU-walk or REF-walk, the symlink stack needs to contain,
+along with the path remnants:
+
+- the `struct path` to provide a reference to the inode in REF-walk
+- the `struct inode *` to provide a reference to the inode in RCU-walk
+- the `seq` to allow the path to be safely switched from RCU-walk to REF-walk
+- the `cookie` that tells `->put_path()` what to put.
+
+This means that each entry in the symlink stack needs to hold five
+pointers and an integer instead of just one pointer (the path
+remnant). On a 64-bit system, this is about 40 bytes per entry;
+with 40 entries it adds up to 1600 bytes total, which is less than
+half a page. So it might seem like a lot, but is by no means
+excessive.
+
+Note that, in a given stack frame, the path remnant (`name`) is not
+part of the symlink that the other fields refer to. It is the remnant
+to be followed once that symlink has been fully parsed.
+
+Following the symlink
+---------------------
+
+The main loop in `link_path_walk()` iterates seamlessly over all
+components in the path and all of the non-final symlinks. As symlinks
+are processed, the `name` pointer is adjusted to point to a new
+symlink, or is restored from the stack, so that much of the loop
+doesn't need to notice. Getting this `name` variable on and off the
+stack is very straightforward; pushing and popping the references is
+a little more complex.
+
+When a symlink is found, `walk_component()` returns the value `1`
+(`0` is returned for any other sort of success, and a negative number
+is, as usual, an error indicator). This causes `get_link()` to be
+called; it then gets the link from the filesystem. Providing that
+operation is successful, the old path `name` is placed on the stack,
+and the new value is used as the `name` for a while. When the end of
+the path is found (i.e. `*name` is `'\0'`) the old `name` is restored
+off the stack and path walking continues.
+
+Pushing and popping the reference pointers (inode, cookie, etc.) is more
+complex in part because of the desire to handle tail recursion. When
+the last component of a symlink itself points to a symlink, we
+want to pop the symlink-just-completed off the stack before pushing
+the symlink-just-found to avoid leaving empty path remnants that would
+just get in the way.
+
+It is most convenient to push the new symlink references onto the
+stack in `walk_component()` immediately when the symlink is found;
+`walk_component()` is also the last piece of code that needs to look at the
+old symlink as it walks that last component. So it is quite
+convenient for `walk_component()` to release the old symlink and pop
+the references just before pushing the reference information for the
+new symlink. It is guided in this by two flags; `WALK_GET`, which
+gives it permission to follow a symlink if it finds one, and
+`WALK_PUT`, which tells it to release the current symlink after it has been
+followed. `WALK_PUT` is tested first, leading to a call to
+`put_link()`. `WALK_GET` is tested subsequently (by
+`should_follow_link()`) leading to a call to `pick_link()` which sets
+up the stack frame.
+
+### Symlinks with no final component ###
+
+A pair of special-case symlinks deserve a little further explanation.
+Both result in a new `struct path` (with mount and dentry) being set
+up in the `nameidata`, and result in `get_link()` returning `NULL`.
+
+The more obvious case is a symlink to "`/`". All symlinks starting
+with "`/`" are detected in `get_link()` which resets the `nameidata`
+to point to the effective filesystem root. If the symlink only
+contains "`/`" then there is nothing more to do, no components at all,
+so `NULL` is returned to indicate that the symlink can be released and
+the stack frame discarded.
+
+The other case involves things in `/proc` that look like symlinks but
+aren't really.
+
+> $ ls -l /proc/self/fd/1
+> lrwx------ 1 neilb neilb 64 Jun 13 10:19 /proc/self/fd/1 -> /dev/pts/4
+
+Every open file descriptor in any process is represented in `/proc` by
+something that looks like a symlink. It is really a reference to the
+target file, not just the name of it. When you `readlink` these
+objects you get a name that might refer to the same file - unless it
+has been unlinked or mounted over. When `walk_component()` follows
+one of these, the `->follow_link()` method in "procfs" doesn't return
+a string name, but instead calls `nd_jump_link()` which updates the
+`nameidata` in place to point to that target. `->follow_link()` then
+returns `NULL`. Again there is no final component and `get_link()`
+reports this by leaving the `last_type` field of `nameidata` as
+`LAST_BIND`.
+
+Following the symlink in the final component
+--------------------------------------------
+
+All this leads to `link_path_walk()` walking down every component, and
+following all symbolic links it finds, until it reaches the final
+component. This is just returned in the `last` field of `nameidata`.
+For some callers, this is all they need; they want to create that
+`last` name if it doesn't exist or give an error if it does. Other
+callers will want to follow a symlink if one is found, and possibly
+apply special handling to the last component of that symlink, rather
+than just the last component of the original file name. These callers
+potentially need to call `link_path_walk()` again and again on
+successive symlinks until one is found that doesn't point to another
+symlink.
+
+This case is handled by the relevant caller of `link_path_walk()`, such as
+`path_lookupat()` using a loop that calls `link_path_walk()`, and then
+handles the final component. If the final component is a symlink
+that needs to be followed, then `trailing_symlink()` is called to set
+things up properly and the loop repeats, calling `link_path_walk()`
+again. This could loop as many as 40 times if the last component of
+each symlink is another symlink.
+
+The various functions that examine the final component and possibly
+report that it is a symlink are `lookup_last()`, `mountpoint_last()`
+and `do_last()`, each of which use the same convention as
+`walk_component()` of returning `1` if a symlink was found that needs
+to be followed.
+
+Of these, `do_last()` is the most interesting as it is used for
+opening a file. Part of `do_last()` runs with `i_mutex` held and this
+part is in a separate function: `lookup_open()`.
+
+Explaining `do_last()` completely is beyond the scope of this article,
+but a few highlights should help those interested in exploring the
+code.
+
+1. Rather than just finding the target file, `do_last()` needs to open
+ it. If the file was found in the dcache, then `vfs_open()` is used for
+ this. If not, then `lookup_open()` will either call `atomic_open()` (if
+ the filesystem provides it) to combine the final lookup with the open, or
+ will perform the separate `lookup_real()` and `vfs_create()` steps
+ directly. In the later case the actual "open" of this newly found or
+ created file will be performed by `vfs_open()`, just as if the name
+ were found in the dcache.
+
+2. `vfs_open()` can fail with `-EOPENSTALE` if the cached information
+ wasn't quite current enough. Rather than restarting the lookup from
+ the top with `LOOKUP_REVAL` set, `lookup_open()` is called instead,
+ giving the filesystem a chance to resolve small inconsistencies.
+ If that doesn't work, only then is the lookup restarted from the top.
+
+3. An open with O_CREAT **does** follow a symlink in the final component,
+ unlike other creation system calls (like `mkdir`). So the sequence:
+
+ > ln -s bar /tmp/foo
+ > echo hello > /tmp/foo
+
+ will create a file called `/tmp/bar`. This is not permitted if
+ `O_EXCL` is set but otherwise is handled for an O_CREAT open much
+ like for a non-creating open: `should_follow_link()` returns `1`, and
+ so does `do_last()` so that `trailing_symlink()` gets called and the
+ open process continues on the symlink that was found.
+
+Updating the access time
+------------------------
+
+We previously said of RCU-walk that it would "take no locks, increment
+no counts, leave no footprints." We have since seen that some
+"footprints" can be needed when handling symlinks as a counted
+reference (or even a memory allocation) may be needed. But these
+footprints are best kept to a minimum.
+
+One other place where walking down a symlink can involve leaving
+footprints in a way that doesn't affect directories is in updating access times.
+In Unix (and Linux) every filesystem object has a "last accessed
+time", or "`atime`". Passing through a directory to access a file
+within is not considered to be an access for the purposes of
+`atime`; only listing the contents of a directory can update its `atime`.
+Symlinks are different it seems. Both reading a symlink (with `readlink()`)
+and looking up a symlink on the way to some other destination can
+update the atime on that symlink.
+
+[clearest statement]: http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_08
+
+It is not clear why this is the case; POSIX has little to say on the
+subject. The [clearest statement] is that, if a particular implementation
+updates a timestamp in a place not specified by POSIX, this must be
+documented "except that any changes caused by pathname resolution need
+not be documented". This seems to imply that POSIX doesn't really
+care about access-time updates during pathname lookup.
+
+[Linux 1.3.87]: https://git.kernel.org/cgit/linux/kernel/git/history/history.git/diff/fs/ext2/symlink.c?id=f806c6db77b8eaa6e00dcfb6b567706feae8dbb8
+
+An examination of history shows that prior to [Linux 1.3.87], the ext2
+filesystem, at least, didn't update atime when following a link.
+Unfortunately we have no record of why that behavior was changed.
+
+In any case, access time must now be updated and that operation can be
+quite complex. Trying to stay in RCU-walk while doing it is best
+avoided. Fortunately it is often permitted to skip the `atime`
+update. Because `atime` updates cause performance problems in various
+areas, Linux supports the `relatime` mount option, which generally
+limits the updates of `atime` to once per day on files that aren't
+being changed (and symlinks never change once created). Even without
+`relatime`, many filesystems record `atime` with a one-second
+granularity, so only one update per second is required.
+
+It is easy to test if an `atime` update is needed while in RCU-walk
+mode and, if it isn't, the update can be skipped and RCU-walk mode
+continues. Only when an `atime` update is actually required does the
+path walk drop down to REF-walk. All of this is handled in the
+`get_link()` function.
+
+A few flags
+-----------
+
+A suitable way to wrap up this tour of pathname walking is to list
+the various flags that can be stored in the `nameidata` to guide the
+lookup process. Many of these are only meaningful on the final
+component, others reflect the current state of the pathname lookup.
+And then there is `LOOKUP_EMPTY`, which doesn't fit conceptually with
+the others. If this is not set, an empty pathname causes an error
+very early on. If it is set, empty pathnames are not considered to be
+an error.
+
+### Global state flags ###
+
+We have already met two global state flags: `LOOKUP_RCU` and
+`LOOKUP_REVAL`. These select between one of three overall approaches
+to lookup: RCU-walk, REF-walk, and REF-walk with forced revalidation.
+
+`LOOKUP_PARENT` indicates that the final component hasn't been reached
+yet. This is primarily used to tell the audit subsystem the full
+context of a particular access being audited.
+
+`LOOKUP_ROOT` indicates that the `root` field in the `nameidata` was
+provided by the caller, so it shouldn't be released when it is no
+longer needed.
+
+`LOOKUP_JUMPED` means that the current dentry was chosen not because
+it had the right name but for some other reason. This happens when
+following "`..`", following a symlink to `/`, crossing a mount point
+or accessing a "`/proc/$PID/fd/$FD`" symlink. In this case the
+filesystem has not been asked to revalidate the name (with
+`d_revalidate()`). In such cases the inode may still need to be
+revalidated, so `d_op->d_weak_revalidate()` is called if
+`LOOKUP_JUMPED` is set when the look completes - which may be at the
+final component or, when creating, unlinking, or renaming, at the penultimate component.
+
+### Final-component flags ###
+
+Some of these flags are only set when the final component is being
+considered. Others are only checked for when considering that final
+component.
+
+`LOOKUP_AUTOMOUNT` ensures that, if the final component is an automount
+point, then the mount is triggered. Some operations would trigger it
+anyway, but operations like `stat()` deliberately don't. `statfs()`
+needs to trigger the mount but otherwise behaves a lot like `stat()`, so
+it sets `LOOKUP_AUTOMOUNT`, as does "`quotactl()`" and the handling of
+"`mount --bind`".
+
+`LOOKUP_FOLLOW` has a similar function to `LOOKUP_AUTOMOUNT` but for
+symlinks. Some system calls set or clear it implicitly, while
+others have API flags such as `AT_SYMLINK_FOLLOW` and
+`UMOUNT_NOFOLLOW` to control it. Its effect is similar to
+`WALK_GET` that we already met, but it is used in a different way.
+
+`LOOKUP_DIRECTORY` insists that the final component is a directory.
+Various callers set this and it is also set when the final component
+is found to be followed by a slash.
+
+Finally `LOOKUP_OPEN`, `LOOKUP_CREATE`, `LOOKUP_EXCL`, and
+`LOOKUP_RENAME_TARGET` are not used directly by the VFS but are made
+available to the filesystem and particularly the `->d_revalidate()`
+method. A filesystem can choose not to bother revalidating too hard
+if it knows that it will be asked to open or create the file soon.
+These flags were previously useful for `->lookup()` too but with the
+introduction of `->atomic_open()` they are less relevant there.
+
+End of the road
+---------------
+
+Despite its complexity, all this pathname lookup code appears to be
+in good shape - various parts are certainly easier to understand now
+than even a couple of releases ago. But that doesn't mean it is
+"finished". As already mentioned, RCU-walk currently only follows
+symlinks that are stored in the inode so, while it handles many ext4
+symlinks, it doesn't help with NFS, XFS, or Btrfs. That support
+is not likely to be long delayed.
diff --git a/Documentation/filesystems/path-lookup.txt b/Documentation/filesystems/path-lookup.txt
index 3571667c7105..9b8930f589d9 100644
--- a/Documentation/filesystems/path-lookup.txt
+++ b/Documentation/filesystems/path-lookup.txt
@@ -379,4 +379,4 @@ Papers and other documentation on dcache locking
2. http://lse.sourceforge.net/locking/dcache/dcache.html
-
+3. path-lookup.md in this directory.
diff --git a/Documentation/filesystems/sysfs-tagging.txt b/Documentation/filesystems/sysfs-tagging.txt
index eb843e49c5a3..c7c8e6438958 100644
--- a/Documentation/filesystems/sysfs-tagging.txt
+++ b/Documentation/filesystems/sysfs-tagging.txt
@@ -17,13 +17,13 @@ the sysfs directory entries we ensure that we don't have conflicts
in the directories and applications only see a limited set of
the network devices.
-Each sysfs directory entry may be tagged with zero or one
-namespaces. A sysfs_dirent is augmented with a void *s_ns. If a
-directory entry is tagged, then sysfs_dirent->s_flags will have a
-flag between KOBJ_NS_TYPE_NONE and KOBJ_NS_TYPES, and s_ns will
-point to the namespace to which it belongs.
+Each sysfs directory entry may be tagged with a namespace via the
+void *ns member of its kernfs_node. If a directory entry is tagged,
+then kernfs_node->flags will have a flag between KOBJ_NS_TYPE_NONE
+and KOBJ_NS_TYPES, and ns will point to the namespace to which it
+belongs.
-Each sysfs superblock's sysfs_super_info contains an array void
+Each sysfs superblock's kernfs_super_info contains an array void
*ns[KOBJ_NS_TYPES]. When a task in a tagging namespace
kobj_nstype first mounts sysfs, a new superblock is created. It
will be differentiated from other sysfs mounts by having its
@@ -31,7 +31,7 @@ s_fs_info->ns[kobj_nstype] set to the new namespace. Note that
through bind mounting and mounts propagation, a task can easily view
the contents of other namespaces' sysfs mounts. Therefore, when a
namespace exits, it will call kobj_ns_exit() to invalidate any
-sysfs_dirent->s_ns pointers pointing to it.
+kernfs_node->ns pointers pointing to it.
Users of this interface:
- define a type in the kobj_ns_type enumeration.
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
index 9494afb9476a..24da7b32c489 100644
--- a/Documentation/filesystems/sysfs.txt
+++ b/Documentation/filesystems/sysfs.txt
@@ -40,7 +40,7 @@ ancestors of object hierarchies; i.e. the subsystems the objects
belong to.
Sysfs internally stores a pointer to the kobject that implements a
-directory in the sysfs_dirent object associated with the directory. In
+directory in the kernfs_node object associated with the directory. In
the past this kobject pointer has been used by sysfs to do reference
counting directly on the kobject whenever the file is opened or closed.
With the current sysfs implementation the kobject reference count is
@@ -191,9 +191,10 @@ implementations:
be called again, rearmed, to fill the buffer.
- On write(2), sysfs expects the entire buffer to be passed during the
- first write. Sysfs then passes the entire buffer to the store()
- method.
-
+ first write. Sysfs then passes the entire buffer to the store() method.
+ A terminating null is added after the data on stores. This makes
+ functions like sysfs_streq() safe to use.
+
When writing sysfs files, userspace processes should first read the
entire file, modify the values it wishes to change, then write the
entire buffer back.
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
index f59c43b6411b..3092178628c4 100644
--- a/Documentation/gpio/board.txt
+++ b/Documentation/gpio/board.txt
@@ -21,8 +21,8 @@ exact way to do it depends on the GPIO controller providing the GPIOs, see the
device tree bindings for your controller.
GPIOs mappings are defined in the consumer device's node, in a property named
-either <function>-gpios or <function>-gpio, where <function> is the function
-the driver will request through gpiod_get(). For example:
+<function>-gpios, where <function> is the function the driver will request
+through gpiod_get(). For example:
foo_device {
compatible = "acme,foo";
@@ -31,9 +31,13 @@ the driver will request through gpiod_get(). For example:
<&gpio 16 GPIO_ACTIVE_HIGH>, /* green */
<&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */
- power-gpio = <&gpio 1 GPIO_ACTIVE_LOW>;
+ power-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
};
+Properties named <function>-gpio are also considered valid and old bindings use
+it but are only supported for compatibility reasons and should not be used for
+newer bindings since it has been deprecated.
+
This property will make GPIOs 15, 16 and 17 available to the driver under the
"led" function, and GPIO 1 as the "power" GPIO:
diff --git a/Documentation/gpio/sysfs.txt b/Documentation/gpio/sysfs.txt
index 0700b55637f5..aeab01aa4d00 100644
--- a/Documentation/gpio/sysfs.txt
+++ b/Documentation/gpio/sysfs.txt
@@ -20,14 +20,14 @@ userspace GPIO can be used to determine system configuration data that
standard kernels won't know about. And for some tasks, simple userspace
GPIO drivers could be all that the system really needs.
-DO NOT ABUSE SYFS TO CONTROL HARDWARE THAT HAS PROPER KERNEL DRIVERS.
+DO NOT ABUSE SYSFS TO CONTROL HARDWARE THAT HAS PROPER KERNEL DRIVERS.
PLEASE READ THE DOCUMENT NAMED "drivers-on-gpio.txt" IN THIS DOCUMENTATION
DIRECTORY TO AVOID REINVENTING KERNEL WHEELS IN USERSPACE. I MEAN IT.
REALLY.
Paths in Sysfs
--------------
-There are three kinds of entry in /sys/class/gpio:
+There are three kinds of entries in /sys/class/gpio:
- Control interfaces used to get userspace control over GPIOs;
@@ -106,7 +106,7 @@ read-only attributes:
"label" ... provided for diagnostics (not always unique)
- "ngpio" ... how many GPIOs this manges (N to N + ngpio - 1)
+ "ngpio" ... how many GPIOs this manages (N to N + ngpio - 1)
Board documentation should in most cases cover what GPIOs are used for
what purposes. However, those numbers are not always stable; GPIOs on
diff --git a/Documentation/kernel-docs.txt b/Documentation/kernel-docs.txt
index eda1eb1451a0..08913361e054 100644
--- a/Documentation/kernel-docs.txt
+++ b/Documentation/kernel-docs.txt
@@ -696,18 +696,18 @@
Memory related patches, HOWTOs, links, mm developers... Don't miss
it if you are interested in memory management development!
- * Name: "Kernel Newbies IRC Channel"
+ * Name: "Kernel Newbies IRC Channel and Website"
URL: http://www.kernelnewbies.org
Keywords: IRC, newbies, channel, asking doubts.
- Description: #kernelnewbies on irc.openprojects.net. From the web
- page: "#kernelnewbies is an IRC network dedicated to the 'newbie'
+ Description: #kernelnewbies on irc.oftc.net.
+ #kernelnewbies is an IRC network dedicated to the 'newbie'
kernel hacker. The audience mostly consists of people who are
learning about the kernel, working on kernel projects or
professional kernel hackers that want to help less seasoned kernel
- people. [...] #kernelnewbies is on the Open Projects IRC Network,
- try irc.openprojects.net or irc.<country>.openprojects.net as your
- server and then /join #kernelnewbies". It also hosts articles,
- documents, FAQs...
+ people.
+ #kernelnewbies is on the OFTC IRC Network.
+ Try irc.oftc.net as your server and then /join #kernelnewbies.
+ The kernelnewbies website also hosts articles, documents, FAQs...
* Name: "linux-kernel mailing list archives and search engines"
URL: http://vger.kernel.org/vger-lists.html
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 0231f4508abe..84c0214b64a7 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -790,8 +790,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
is passed, kernel could allocate physical memory region
above 4G, that cause second kernel crash on system
that require some amount of low memory, e.g. swiotlb
- requires at least 64M+32K low memory. Kernel would
- try to allocate 72M below 4G automatically.
+ requires at least 64M+32K low memory, also enough extra
+ low memory is needed to make sure DMA buffers for 32-bit
+ devices won't run out. Kernel would try to allocate at
+ at least 256M below 4G automatically.
This one let user to specify own low range under 4G
for second kernel instead.
0: to disable low allocation.
@@ -972,6 +974,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
earlycon= [KNL] Output early console device and options.
+ When used with no options, the early console is
+ determined by the stdout-path property in device
+ tree's chosen node.
+
cdns,<addr>
Start an early, polled-mode console on a cadence serial
port at the specified address. The cadence serial port
@@ -1584,6 +1590,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nosid disable Source ID checking
no_x2apic_optout
BIOS x2APIC opt-out request will be ignored
+ nopost disable Interrupt Posting
iomem= Disable strict checking of access to MMIO memory
strict regions from userspace.
@@ -2345,11 +2352,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nmi_watchdog= [KNL,BUGS=X86] Debugging features for SMP kernels
Format: [panic,][nopanic,][num]
Valid num: 0 or 1
- 0 - turn nmi_watchdog off
- 1 - turn nmi_watchdog on
+ 0 - turn hardlockup detector in nmi_watchdog off
+ 1 - turn hardlockup detector in nmi_watchdog on
When panic is specified, panic when an NMI watchdog
timeout occurs (or 'nopanic' to override the opposite
- default).
+ default). To disable both hard and soft lockup detectors,
+ please see 'nowatchdog'.
This is useful when you use a panic=... timeout and
need the box quickly up again.
diff --git a/Documentation/kselftest.txt b/Documentation/kselftest.txt
index a87d840bacfe..9bbbcdc598d9 100644
--- a/Documentation/kselftest.txt
+++ b/Documentation/kselftest.txt
@@ -54,6 +54,22 @@ To run the hotplug tests:
- note that some tests will require root privileges.
+Install selftests
+=================
+
+You can use kselftest_install.sh tool installs selftests in default
+location which is tools/testing/selftests/kselftest or an user specified
+location.
+
+To install selftests in default location:
+ $ cd tools/testing/selftests
+ $ ./kselftest_install.sh
+
+To install selftests in an user specified location:
+ $ cd tools/testing/selftests
+ $ ./kselftest_install.sh install_dir
+
+
Contributing new tests
======================
diff --git a/Documentation/misc-devices/apds990x.txt b/Documentation/misc-devices/apds990x.txt
index d5408cade32f..454d95d623b3 100644
--- a/Documentation/misc-devices/apds990x.txt
+++ b/Documentation/misc-devices/apds990x.txt
@@ -30,7 +30,7 @@ lead to false interrupt, but that doesn't harm.
ALS contains 4 different gain steps. Driver automatically
selects suitable gain step. After each measurement, reliability of the results
-is estimated and new measurement is trigged if necessary.
+is estimated and new measurement is triggered if necessary.
Platform data can provide tuned values to the conversion formulas if
values are known. Otherwise plain sensor default values are used.
diff --git a/Documentation/misc-devices/isl29003 b/Documentation/misc-devices/isl29003
index c4ff5f38e010..80b952fd32ff 100644
--- a/Documentation/misc-devices/isl29003
+++ b/Documentation/misc-devices/isl29003
@@ -29,7 +29,7 @@ Detection
The ISL29003 does not have an ID register which could be used to identify
it, so the detection routine will just try to read from the configured I2C
-addess and consider the device to be present as soon as it ACKs the
+address and consider the device to be present as soon as it ACKs the
transfer.
diff --git a/Documentation/misc-devices/max6875 b/Documentation/misc-devices/max6875
index 1e89ee3ccc1b..2f2bd0b17b5d 100644
--- a/Documentation/misc-devices/max6875
+++ b/Documentation/misc-devices/max6875
@@ -22,7 +22,7 @@ At reset, the MAX6875 reads the configuration EEPROM into its configuration
registers. The chip then begins to operate according to the values in the
registers.
-The Maxim MAX6874 is a similar, mostly compatible device, with more intputs
+The Maxim MAX6874 is a similar, mostly compatible device, with more inputs
and outputs:
vin gpi vout
MAX6874 6 4 8
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index fd1a1aad49a9..4636b94518da 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -1018,25 +1018,34 @@ solution for a couple of reasons:
$ ip link set can0 type can help
Usage: ip link set DEVICE type can
- [ bitrate BITRATE [ sample-point SAMPLE-POINT] ] |
- [ tq TQ prop-seg PROP_SEG phase-seg1 PHASE-SEG1
- phase-seg2 PHASE-SEG2 [ sjw SJW ] ]
-
- [ loopback { on | off } ]
- [ listen-only { on | off } ]
- [ triple-sampling { on | off } ]
-
- [ restart-ms TIME-MS ]
- [ restart ]
-
- Where: BITRATE := { 1..1000000 }
- SAMPLE-POINT := { 0.000..0.999 }
- TQ := { NUMBER }
- PROP-SEG := { 1..8 }
- PHASE-SEG1 := { 1..8 }
- PHASE-SEG2 := { 1..8 }
- SJW := { 1..4 }
- RESTART-MS := { 0 | NUMBER }
+ [ bitrate BITRATE [ sample-point SAMPLE-POINT] ] |
+ [ tq TQ prop-seg PROP_SEG phase-seg1 PHASE-SEG1
+ phase-seg2 PHASE-SEG2 [ sjw SJW ] ]
+
+ [ dbitrate BITRATE [ dsample-point SAMPLE-POINT] ] |
+ [ dtq TQ dprop-seg PROP_SEG dphase-seg1 PHASE-SEG1
+ dphase-seg2 PHASE-SEG2 [ dsjw SJW ] ]
+
+ [ loopback { on | off } ]
+ [ listen-only { on | off } ]
+ [ triple-sampling { on | off } ]
+ [ one-shot { on | off } ]
+ [ berr-reporting { on | off } ]
+ [ fd { on | off } ]
+ [ fd-non-iso { on | off } ]
+ [ presume-ack { on | off } ]
+
+ [ restart-ms TIME-MS ]
+ [ restart ]
+
+ Where: BITRATE := { 1..1000000 }
+ SAMPLE-POINT := { 0.000..0.999 }
+ TQ := { NUMBER }
+ PROP-SEG := { 1..8 }
+ PHASE-SEG1 := { 1..8 }
+ PHASE-SEG2 := { 1..8 }
+ SJW := { 1..4 }
+ RESTART-MS := { 0 | NUMBER }
- Display CAN device details and statistics:
@@ -1178,7 +1187,55 @@ solution for a couple of reasons:
The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
N.B. CAN FD capable devices can also handle and send legacy CAN frames.
- FIXME: Add details about the CAN FD controller configuration when available.
+ When configuring CAN FD capable CAN controllers an additional 'data' bitrate
+ has to be set. This bitrate for the data phase of the CAN FD frame has to be
+ at least the bitrate which was configured for the arbitration phase. This
+ second bitrate is specified analogue to the first bitrate but the bitrate
+ setting keywords for the 'data' bitrate start with 'd' e.g. dbitrate,
+ dsample-point, dsjw or dtq and similar settings. When a data bitrate is set
+ within the configuration process the controller option "fd on" can be
+ specified to enable the CAN FD mode in the CAN controller. This controller
+ option also switches the device MTU to 72 (CANFD_MTU).
+
+ The first CAN FD specification presented as whitepaper at the International
+ CAN Conference 2012 needed to be improved for data integrity reasons.
+ Therefore two CAN FD implementations have to be distinguished today:
+
+ - ISO compliant: The ISO 11898-1:2015 CAN FD implementation (default)
+ - non-ISO compliant: The CAN FD implementation following the 2012 whitepaper
+
+ Finally there are three types of CAN FD controllers:
+
+ 1. ISO compliant (fixed)
+ 2. non-ISO compliant (fixed, like the M_CAN IP core v3.0.1 in m_can.c)
+ 3. ISO/non-ISO CAN FD controllers (switchable, like the PEAK PCAN-USB FD)
+
+ The current ISO/non-ISO mode is announced by the CAN controller driver via
+ netlink and displayed by the 'ip' tool (controller option FD-NON-ISO).
+ The ISO/non-ISO-mode can be altered by setting 'fd-non-iso {on|off}' for
+ switchable CAN FD controllers only.
+
+ Example configuring 500 kbit/s arbitration bitrate and 4 Mbit/s data bitrate:
+
+ $ ip link set can0 up type can bitrate 500000 sample-point 0.75 \
+ dbitrate 4000000 dsample-point 0.8 fd on
+ $ ip -details link show can0
+ 5: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 72 qdisc pfifo_fast state UNKNOWN \
+ mode DEFAULT group default qlen 10
+ link/can promiscuity 0
+ can <FD> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
+ bitrate 500000 sample-point 0.750
+ tq 50 prop-seg 14 phase-seg1 15 phase-seg2 10 sjw 1
+ pcan_usb_pro_fd: tseg1 1..64 tseg2 1..16 sjw 1..16 brp 1..1024 \
+ brp-inc 1
+ dbitrate 4000000 dsample-point 0.800
+ dtq 12 dprop-seg 7 dphase-seg1 8 dphase-seg2 4 dsjw 1
+ pcan_usb_pro_fd: dtseg1 1..16 dtseg2 1..8 dsjw 1..4 dbrp 1..1024 \
+ dbrp-inc 1
+ clock 80000000
+
+ Example when 'fd-non-iso on' is added on this switchable CAN FD adapter:
+ can <FD,FD-NON-ISO> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
6.7 Supported CAN hardware
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt
index 39873ef41bf9..b9d9cc57be18 100644
--- a/Documentation/rbtree.txt
+++ b/Documentation/rbtree.txt
@@ -203,7 +203,7 @@ functions with the user provided augmentation callback when inserting
and erasing nodes.
C files implementing augmented rbtree manipulation must include
-<linux/rbtree_augmented.h> instead of <linus/rbtree.h>. Note that
+<linux/rbtree_augmented.h> instead of <linux/rbtree.h>. Note that
linux/rbtree_augmented.h exposes some rbtree implementations details
you are not expected to rely on; please stick to the documented APIs
there and do not include <linux/rbtree_augmented.h> from header files
diff --git a/Documentation/security/Smack.txt b/Documentation/security/Smack.txt
index 5e6d07fbed07..945cc633d883 100644
--- a/Documentation/security/Smack.txt
+++ b/Documentation/security/Smack.txt
@@ -255,6 +255,16 @@ unconfined
the access permitted if it wouldn't be otherwise. Note that this
is dangerous and can ruin the proper labeling of your system.
It should never be used in production.
+relabel-self
+ This interface contains a list of labels to which the process can
+ transition to, by writing to /proc/self/attr/current.
+ Normally a process can change its own label to any legal value, but only
+ if it has CAP_MAC_ADMIN. This interface allows a process without
+ CAP_MAC_ADMIN to relabel itself to one of labels from predefined list.
+ A process without CAP_MAC_ADMIN can change its label only once. When it
+ does, this list will be cleared.
+ The values are set by writing the desired labels, separated
+ by spaces, to the file or cleared by writing "-" to the file.
If you are using the smackload utility
you can add access rules in /etc/smack/accesses. They take the form:
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index c9e7f4f223a5..8c183873b2b7 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -1049,12 +1049,12 @@ search a specific keyring, so using keyrings in this way is of limited utility.
NOTES ON ACCESSING PAYLOAD CONTENTS
===================================
-The simplest payload is just a number in key->payload.value. In this case,
-there's no need to indulge in RCU or locking when accessing the payload.
+The simplest payload is just data stored in key->payload directly. In this
+case, there's no need to indulge in RCU or locking when accessing the payload.
-More complex payload contents must be allocated and a pointer to them set in
-key->payload.data. One of the following ways must be selected to access the
-data:
+More complex payload contents must be allocated and pointers to them set in the
+key->payload.data[] array. One of the following ways must be selected to
+access the data:
(1) Unmodifiable key type.
@@ -1092,6 +1092,13 @@ data:
the payload. key->datalen cannot be relied upon to be consistent with the
payload just dereferenced if the key's semaphore is not held.
+ Note that key->payload.data[0] has a shadow that is marked for __rcu
+ usage. This is called key->payload.rcu_data0. The following accessors
+ wrap the RCU calls to this element:
+
+ rcu_assign_keypointer(struct key *key, void *data);
+ void *rcu_dereference_key(struct key *key);
+
===================
DEFINING A KEY TYPE
@@ -1143,8 +1150,7 @@ The structure has a number of fields, some of which are mandatory:
struct key_preparsed_payload {
char *description;
- void *type_data[2];
- void *payload;
+ union key_payload payload;
const void *data;
size_t datalen;
size_t quotalen;
@@ -1160,10 +1166,9 @@ The structure has a number of fields, some of which are mandatory:
attached as a string to the description field. This will be used for the
key description if the caller of add_key() passes NULL or "".
- The method can attach anything it likes to type_data[] and payload. These
- are merely passed along to the instantiate() or update() operations. If
- set, the expiry time will be applied to the key if it is instantiated from
- this data.
+ The method can attach anything it likes to payload. This is merely passed
+ along to the instantiate() or update() operations. If set, the expiry
+ time will be applied to the key if it is instantiated from this data.
The method should return 0 if successful or a negative error code
otherwise.
@@ -1172,11 +1177,10 @@ The structure has a number of fields, some of which are mandatory:
(*) void (*free_preparse)(struct key_preparsed_payload *prep);
This method is only required if the preparse() method is provided,
- otherwise it is unused. It cleans up anything attached to the
- description, type_data and payload fields of the key_preparsed_payload
- struct as filled in by the preparse() method. It will always be called
- after preparse() returns successfully, even if instantiate() or update()
- succeed.
+ otherwise it is unused. It cleans up anything attached to the description
+ and payload fields of the key_preparsed_payload struct as filled in by the
+ preparse() method. It will always be called after preparse() returns
+ successfully, even if instantiate() or update() succeed.
(*) int (*instantiate)(struct key *key, struct key_preparsed_payload *prep);
@@ -1197,6 +1201,11 @@ The structure has a number of fields, some of which are mandatory:
It is safe to sleep in this method.
+ generic_key_instantiate() is provided to simply copy the data from
+ prep->payload.data[] to key->payload.data[], with RCU-safe assignment on
+ the first element. It will then clear prep->payload.data[] so that the
+ free_preparse method doesn't release the data.
+
(*) int (*update)(struct key *key, const void *data, size_t datalen);
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index f4b395bdc090..282102014bb9 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -193,3 +193,4 @@
192 -> AverMedia AverTV Satellite Hybrid+FM A706 [1461:2055]
193 -> WIS Voyager or compatible [1905:7007]
194 -> AverMedia AverTV/505 [1461:a10a]
+195 -> Leadtek Winfast TV2100 FM [107d:6f3a]
diff --git a/Documentation/video4linux/v4l2-pci-skeleton.c b/Documentation/video4linux/v4l2-pci-skeleton.c
index 9c80c090e92d..95ae82860092 100644
--- a/Documentation/video4linux/v4l2-pci-skeleton.c
+++ b/Documentation/video4linux/v4l2-pci-skeleton.c
@@ -37,6 +37,7 @@
#include <media/v4l2-dv-timings.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
MODULE_DESCRIPTION("V4L2 PCI Skeleton Driver");
@@ -162,10 +163,11 @@ static irqreturn_t skeleton_irq(int irq, void *dev_id)
* minimum number: many DMA engines need a minimum of 2 buffers in the
* queue and you need to have another available for userspace processing.
*/
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct skeleton *skel = vb2_get_drv_priv(vq);
skel->field = skel->format.field;
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index d9ecceea5a02..092ee9fbaf2b 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -401,10 +401,9 @@ Capability: basic
Architectures: x86, ppc, mips
Type: vcpu ioctl
Parameters: struct kvm_interrupt (in)
-Returns: 0 on success, -1 on error
+Returns: 0 on success, negative on failure.
-Queues a hardware interrupt vector to be injected. This is only
-useful if in-kernel local APIC or equivalent is not used.
+Queues a hardware interrupt vector to be injected.
/* for KVM_INTERRUPT */
struct kvm_interrupt {
@@ -414,7 +413,14 @@ struct kvm_interrupt {
X86:
-Note 'irq' is an interrupt vector, not an interrupt pin or line.
+Returns: 0 on success,
+ -EEXIST if an interrupt is already enqueued
+ -EINVAL the the irq number is invalid
+ -ENXIO if the PIC is in the kernel
+ -EFAULT if the pointer is invalid
+
+Note 'irq' is an interrupt vector, not an interrupt pin or line. This
+ioctl is useful if the in-kernel PIC is not used.
PPC:
@@ -1598,7 +1604,7 @@ provided event instead of triggering an exit.
struct kvm_ioeventfd {
__u64 datamatch;
__u64 addr; /* legal pio/mmio address */
- __u32 len; /* 1, 2, 4, or 8 bytes */
+ __u32 len; /* 0, 1, 2, 4, or 8 bytes */
__s32 fd;
__u32 flags;
__u8 pad[36];
@@ -1621,6 +1627,10 @@ to the registered address is equal to datamatch in struct kvm_ioeventfd.
For virtio-ccw devices, addr contains the subchannel id and datamatch the
virtqueue index.
+With KVM_CAP_IOEVENTFD_ANY_LENGTH, a zero length ioeventfd is allowed, and
+the kernel will ignore the length of guest write and may get a faster vmexit.
+The speedup may only apply to specific architectures, but the ioeventfd will
+work anyway.
4.60 KVM_DIRTY_TLB
@@ -1774,7 +1784,7 @@ has been called, this interface is completely emulated within the kernel.
To use this to emulate the LINT1 input with KVM_CREATE_IRQCHIP, use the
following algorithm:
- - pause the vpcu
+ - pause the vcpu
- read the local APIC's state (KVM_GET_LAPIC)
- check whether changing LINT1 will queue an NMI (see the LVT entry for LINT1)
- if so, issue KVM_NMI
@@ -2798,7 +2808,7 @@ Returns: = 0 on success,
< 0 on generic error (e.g. -EFAULT or -ENOMEM),
> 0 if an exception occurred while walking the page tables
-Read or write data from/to the logical (virtual) memory of a VPCU.
+Read or write data from/to the logical (virtual) memory of a VCPU.
Parameters are specified via the following structure:
@@ -3309,6 +3319,18 @@ Valid values for 'type' are:
to ignore the request, or to gather VM memory core dump and/or
reset/shutdown of the VM.
+ /* KVM_EXIT_IOAPIC_EOI */
+ struct {
+ __u8 vector;
+ } eoi;
+
+Indicates that the VCPU's in-kernel local APIC received an EOI for a
+level-triggered IOAPIC interrupt. This exit only triggers when the
+IOAPIC is implemented in userspace (i.e. KVM_CAP_SPLIT_IRQCHIP is enabled);
+the userspace IOAPIC should process the EOI and retrigger the interrupt if
+it is still asserted. Vector is the LAPIC interrupt vector for which the
+EOI was received.
+
/* Fix the size of the union. */
char padding[256];
};
@@ -3627,6 +3649,26 @@ struct {
KVM handlers should exit to userspace with rc = -EREMOTE.
+7.5 KVM_CAP_SPLIT_IRQCHIP
+
+Architectures: x86
+Parameters: args[0] - number of routes reserved for userspace IOAPICs
+Returns: 0 on success, -1 on error
+
+Create a local apic for each processor in the kernel. This can be used
+instead of KVM_CREATE_IRQCHIP if the userspace VMM wishes to emulate the
+IOAPIC and PIC (and also the PIT, even though this has to be enabled
+separately).
+
+This capability also enables in kernel routing of interrupt requests;
+when KVM_CAP_SPLIT_IRQCHIP only routes of KVM_IRQ_ROUTING_MSI type are
+used in the IRQ routing table. The first args[0] MSI routes are reserved
+for the IOAPIC pins. Whenever the LAPIC receives an EOI for these routes,
+a KVM_EXIT_IOAPIC_EOI vmexit will be reported to userspace.
+
+Fails if VCPU has already been created, or if the irqchip is already in the
+kernel (i.e. KVM_CREATE_IRQCHIP has already been called).
+
8. Other capabilities.
----------------------
diff --git a/Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt b/Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt
new file mode 100644
index 000000000000..38bca2835278
--- /dev/null
+++ b/Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt
@@ -0,0 +1,187 @@
+KVM/ARM VGIC Forwarded Physical Interrupts
+==========================================
+
+The KVM/ARM code implements software support for the ARM Generic
+Interrupt Controller's (GIC's) hardware support for virtualization by
+allowing software to inject virtual interrupts to a VM, which the guest
+OS sees as regular interrupts. The code is famously known as the VGIC.
+
+Some of these virtual interrupts, however, correspond to physical
+interrupts from real physical devices. One example could be the
+architected timer, which itself supports virtualization, and therefore
+lets a guest OS program the hardware device directly to raise an
+interrupt at some point in time. When such an interrupt is raised, the
+host OS initially handles the interrupt and must somehow signal this
+event as a virtual interrupt to the guest. Another example could be a
+passthrough device, where the physical interrupts are initially handled
+by the host, but the device driver for the device lives in the guest OS
+and KVM must therefore somehow inject a virtual interrupt on behalf of
+the physical one to the guest OS.
+
+These virtual interrupts corresponding to a physical interrupt on the
+host are called forwarded physical interrupts, but are also sometimes
+referred to as 'virtualized physical interrupts' and 'mapped interrupts'.
+
+Forwarded physical interrupts are handled slightly differently compared
+to virtual interrupts generated purely by a software emulated device.
+
+
+The HW bit
+----------
+Virtual interrupts are signalled to the guest by programming the List
+Registers (LRs) on the GIC before running a VCPU. The LR is programmed
+with the virtual IRQ number and the state of the interrupt (Pending,
+Active, or Pending+Active). When the guest ACKs and EOIs a virtual
+interrupt, the LR state moves from Pending to Active, and finally to
+inactive.
+
+The LRs include an extra bit, called the HW bit. When this bit is set,
+KVM must also program an additional field in the LR, the physical IRQ
+number, to link the virtual with the physical IRQ.
+
+When the HW bit is set, KVM must EITHER set the Pending OR the Active
+bit, never both at the same time.
+
+Setting the HW bit causes the hardware to deactivate the physical
+interrupt on the physical distributor when the guest deactivates the
+corresponding virtual interrupt.
+
+
+Forwarded Physical Interrupts Life Cycle
+----------------------------------------
+
+The state of forwarded physical interrupts is managed in the following way:
+
+ - The physical interrupt is acked by the host, and becomes active on
+ the physical distributor (*).
+ - KVM sets the LR.Pending bit, because this is the only way the GICV
+ interface is going to present it to the guest.
+ - LR.Pending will stay set as long as the guest has not acked the interrupt.
+ - LR.Pending transitions to LR.Active on the guest read of the IAR, as
+ expected.
+ - On guest EOI, the *physical distributor* active bit gets cleared,
+ but the LR.Active is left untouched (set).
+ - KVM clears the LR on VM exits when the physical distributor
+ active state has been cleared.
+
+(*): The host handling is slightly more complicated. For some forwarded
+interrupts (shared), KVM directly sets the active state on the physical
+distributor before entering the guest, because the interrupt is never actually
+handled on the host (see details on the timer as an example below). For other
+forwarded interrupts (non-shared) the host does not deactivate the interrupt
+when the host ISR completes, but leaves the interrupt active until the guest
+deactivates it. Leaving the interrupt active is allowed, because Linux
+configures the physical GIC with EOIMode=1, which causes EOI operations to
+perform a priority drop allowing the GIC to receive other interrupts of the
+default priority.
+
+
+Forwarded Edge and Level Triggered PPIs and SPIs
+------------------------------------------------
+Forwarded physical interrupts injected should always be active on the
+physical distributor when injected to a guest.
+
+Level-triggered interrupts will keep the interrupt line to the GIC
+asserted, typically until the guest programs the device to deassert the
+line. This means that the interrupt will remain pending on the physical
+distributor until the guest has reprogrammed the device. Since we
+always run the VM with interrupts enabled on the CPU, a pending
+interrupt will exit the guest as soon as we switch into the guest,
+preventing the guest from ever making progress as the process repeats
+over and over. Therefore, the active state on the physical distributor
+must be set when entering the guest, preventing the GIC from forwarding
+the pending interrupt to the CPU. As soon as the guest deactivates the
+interrupt, the physical line is sampled by the hardware again and the host
+takes a new interrupt if and only if the physical line is still asserted.
+
+Edge-triggered interrupts do not exhibit the same problem with
+preventing guest execution that level-triggered interrupts do. One
+option is to not use HW bit at all, and inject edge-triggered interrupts
+from a physical device as pure virtual interrupts. But that would
+potentially slow down handling of the interrupt in the guest, because a
+physical interrupt occurring in the middle of the guest ISR would
+preempt the guest for the host to handle the interrupt. Additionally,
+if you configure the system to handle interrupts on a separate physical
+core from that running your VCPU, you still have to interrupt the VCPU
+to queue the pending state onto the LR, even though the guest won't use
+this information until the guest ISR completes. Therefore, the HW
+bit should always be set for forwarded edge-triggered interrupts. With
+the HW bit set, the virtual interrupt is injected and additional
+physical interrupts occurring before the guest deactivates the interrupt
+simply mark the state on the physical distributor as Pending+Active. As
+soon as the guest deactivates the interrupt, the host takes another
+interrupt if and only if there was a physical interrupt between injecting
+the forwarded interrupt to the guest and the guest deactivating the
+interrupt.
+
+Consequently, whenever we schedule a VCPU with one or more LRs with the
+HW bit set, the interrupt must also be active on the physical
+distributor.
+
+
+Forwarded LPIs
+--------------
+LPIs, introduced in GICv3, are always edge-triggered and do not have an
+active state. They become pending when a device signal them, and as
+soon as they are acked by the CPU, they are inactive again.
+
+It therefore doesn't make sense, and is not supported, to set the HW bit
+for physical LPIs that are forwarded to a VM as virtual interrupts,
+typically virtual SPIs.
+
+For LPIs, there is no other choice than to preempt the VCPU thread if
+necessary, and queue the pending state onto the LR.
+
+
+Putting It Together: The Architected Timer
+------------------------------------------
+The architected timer is a device that signals interrupts with level
+triggered semantics. The timer hardware is directly accessed by VCPUs
+which program the timer to fire at some point in time. Each VCPU on a
+system programs the timer to fire at different times, and therefore the
+hardware is multiplexed between multiple VCPUs. This is implemented by
+context-switching the timer state along with each VCPU thread.
+
+However, this means that a scenario like the following is entirely
+possible, and in fact, typical:
+
+1. KVM runs the VCPU
+2. The guest programs the time to fire in T+100
+3. The guest is idle and calls WFI (wait-for-interrupts)
+4. The hardware traps to the host
+5. KVM stores the timer state to memory and disables the hardware timer
+6. KVM schedules a soft timer to fire in T+(100 - time since step 2)
+7. KVM puts the VCPU thread to sleep (on a waitqueue)
+8. The soft timer fires, waking up the VCPU thread
+9. KVM reprograms the timer hardware with the VCPU's values
+10. KVM marks the timer interrupt as active on the physical distributor
+11. KVM injects a forwarded physical interrupt to the guest
+12. KVM runs the VCPU
+
+Notice that KVM injects a forwarded physical interrupt in step 11 without
+the corresponding interrupt having actually fired on the host. That is
+exactly why we mark the timer interrupt as active in step 10, because
+the active state on the physical distributor is part of the state
+belonging to the timer hardware, which is context-switched along with
+the VCPU thread.
+
+If the guest does not idle because it is busy, the flow looks like this
+instead:
+
+1. KVM runs the VCPU
+2. The guest programs the time to fire in T+100
+4. At T+100 the timer fires and a physical IRQ causes the VM to exit
+ (note that this initially only traps to EL2 and does not run the host ISR
+ until KVM has returned to the host).
+5. With interrupts still disabled on the CPU coming back from the guest, KVM
+ stores the virtual timer state to memory and disables the virtual hw timer.
+6. KVM looks at the timer state (in memory) and injects a forwarded physical
+ interrupt because it concludes the timer has expired.
+7. KVM marks the timer interrupt as active on the physical distributor
+7. KVM enables the timer, enables interrupts, and runs the VCPU
+
+Notice that again the forwarded physical interrupt is injected to the
+guest without having actually been handled on the host. In this case it
+is because the physical interrupt is never actually seen by the host because the
+timer is disabled upon guest return, and the virtual forwarded interrupt is
+injected on the KVM guest entry path.
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index 3fb905429e8a..59541d49e15c 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -44,28 +44,29 @@ Groups:
Attributes:
The attr field of kvm_device_attr encodes two values:
bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 |
- values: | reserved | cpu id | offset |
+ values: | reserved | vcpu_index | offset |
All distributor regs are (rw, 32-bit)
The offset is relative to the "Distributor base address" as defined in the
GICv2 specs. Getting or setting such a register has the same effect as
- reading or writing the register on the actual hardware from the cpu
- specified with cpu id field. Note that most distributor fields are not
- banked, but return the same value regardless of the cpu id used to access
- the register.
+ reading or writing the register on the actual hardware from the cpu whose
+ index is specified with the vcpu_index field. Note that most distributor
+ fields are not banked, but return the same value regardless of the
+ vcpu_index used to access the register.
Limitations:
- Priorities are not implemented, and registers are RAZ/WI
- Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
Errors:
- -ENODEV: Getting or setting this register is not yet supported
+ -ENXIO: Getting or setting this register is not yet supported
-EBUSY: One or more VCPUs are running
+ -EINVAL: Invalid vcpu_index supplied
KVM_DEV_ARM_VGIC_GRP_CPU_REGS
Attributes:
The attr field of kvm_device_attr encodes two values:
bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 |
- values: | reserved | cpu id | offset |
+ values: | reserved | vcpu_index | offset |
All CPU interface regs are (rw, 32-bit)
@@ -91,8 +92,9 @@ Groups:
- Priorities are not implemented, and registers are RAZ/WI
- Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
Errors:
- -ENODEV: Getting or setting this register is not yet supported
+ -ENXIO: Getting or setting this register is not yet supported
-EBUSY: One or more VCPUs are running
+ -EINVAL: Invalid vcpu_index supplied
KVM_DEV_ARM_VGIC_GRP_NR_IRQS
Attributes:
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt
index 5542c4641a3c..2d09d1ed86d0 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virtual/kvm/devices/vm.txt
@@ -74,7 +74,7 @@ struct kvm_s390_vm_cpu_processor {
KVM does not enforce or limit the cpu model data in any form. Take the information
retrieved by means of KVM_S390_VM_CPU_MACHINE as hint for reasonable configuration
-setups. Instruction interceptions triggered by additionally set facilitiy bits that
+setups. Instruction interceptions triggered by additionally set facility bits that
are not handled by KVM need to by imlemented in the VM driver code.
Parameters: address of buffer to store/set the processor related cpu
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index d68af4dc3006..19f94a6b9bb0 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -166,3 +166,15 @@ Comment: The srcu read lock must be held while accessing memslots (e.g.
MMIO/PIO address->device structure mapping (kvm->buses).
The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu
if it is needed by multiple functions.
+
+Name: blocked_vcpu_on_cpu_lock
+Type: spinlock_t
+Arch: x86
+Protects: blocked_vcpu_on_cpu
+Comment: This is a per-CPU lock and it is used for VT-d posted-interrupts.
+ When VT-d posted-interrupts is supported and the VM has assigned
+ devices, we put the blocked vCPU on the list blocked_vcpu_on_cpu
+ protected by blocked_vcpu_on_cpu_lock, when VT-d hardware issues
+ wakeup notification event since external interrupts from the
+ assigned devices happens, we will find the vCPU on the list to
+ wakeup.
diff --git a/Documentation/virtual/kvm/ppc-pv.txt b/Documentation/virtual/kvm/ppc-pv.txt
index 319560646f32..e26115ce4258 100644
--- a/Documentation/virtual/kvm/ppc-pv.txt
+++ b/Documentation/virtual/kvm/ppc-pv.txt
@@ -110,7 +110,7 @@ Flags are passed to the host in the low 12 bits of the Effective Address.
The following flags are currently available for a guest to expose:
- MAGIC_PAGE_FLAG_NOT_MAPPED_NX Guest handles NX bits correclty wrt magic page
+ MAGIC_PAGE_FLAG_NOT_MAPPED_NX Guest handles NX bits correctly wrt magic page
MSR bits
========
diff --git a/Documentation/zh_CN/filesystems/sysfs.txt b/Documentation/zh_CN/filesystems/sysfs.txt
index e230eaa33122..7d3b05edb8ce 100644
--- a/Documentation/zh_CN/filesystems/sysfs.txt
+++ b/Documentation/zh_CN/filesystems/sysfs.txt
@@ -61,7 +61,7 @@ Documentation/kobject.txt 文档以获得更多关于 kobject 接口的
内核的对象层次到用户空间。sysfs 中的顶层目录代表着内核对象层次的
共同祖先;例如:某些对象属于某个子系统。
-Sysfs 在与其目录关联的 sysfs_dirent 对象中内部保存一个指向实现
+Sysfs 在与其目录关联的 kernfs_node 对象中内部保存一个指向实现
目录的 kobject 的指针。以前,这个 kobject 指针被 sysfs 直接用于
kobject 文件打开和关闭的引用计数。而现在的 sysfs 实现中,kobject
引用计数只能通过 sysfs_schedule_callback() 函数直接修改。
diff --git a/MAINTAINERS b/MAINTAINERS
index e887dbb44431..7301ae17ec63 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1489,6 +1489,14 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/platform/s5p-tv/
+ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT
+M: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+M: Jacek Anaszewski <j.anaszewski@samsung.com>
+L: linux-arm-kernel@lists.infradead.org
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/platform/s5p-jpeg/
+
ARM/SHMOBILE ARM ARCHITECTURE
M: Simon Horman <horms@verge.net.au>
M: Magnus Damm <magnus.damm@gmail.com>
@@ -3178,6 +3186,15 @@ F: Documentation/powerpc/cxl.txt
F: Documentation/powerpc/cxl.txt
F: Documentation/ABI/testing/sysfs-class-cxl
+CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER
+M: Manoj N. Kumar <manoj@linux.vnet.ibm.com>
+M: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/cxlflash/
+F: include/uapi/scsi/cxlflash_ioctls.h
+F: Documentation/powerpc/cxlflash.txt
+
STMMAC ETHERNET DRIVER
M: Giuseppe Cavallaro <peppe.cavallaro@st.com>
L: netdev@vger.kernel.org
@@ -3520,13 +3537,15 @@ M: Jonathan Corbet <corbet@lwn.net>
L: linux-doc@vger.kernel.org
S: Maintained
F: Documentation/
+F: scripts/docproc.c
+F: scripts/kernel-doc*
X: Documentation/ABI/
X: Documentation/devicetree/
X: Documentation/acpi
X: Documentation/power
X: Documentation/spi
X: Documentation/DocBook/media
-T: git git://git.lwn.net/linux-2.6.git docs-next
+T: git git://git.lwn.net/linux.git docs-next
DOUBLETALK DRIVER
M: "James R. Van Zandt" <jrv@vanzandt.mv.com>
@@ -7497,10 +7516,10 @@ NOKIA N900 POWER SUPPLY DRIVERS
M: Pali Rohár <pali.rohar@gmail.com>
S: Maintained
F: include/linux/power/bq2415x_charger.h
-F: include/linux/power/bq27x00_battery.h
+F: include/linux/power/bq27xxx_battery.h
F: include/linux/power/isp1704_charger.h
F: drivers/power/bq2415x_charger.c
-F: drivers/power/bq27x00_battery.c
+F: drivers/power/bq27xxx_battery.c
F: drivers/power/isp1704_charger.c
F: drivers/power/rx51_battery.c
@@ -9068,6 +9087,13 @@ F: drivers/s390/net/*iucv*
F: include/net/iucv/
F: net/iucv/
+S390 IOMMU (PCI)
+M: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+L: linux-s390@vger.kernel.org
+W: http://www.ibm.com/developerworks/linux/linux390/
+S: Supported
+F: drivers/iommu/s390-iommu.c
+
S3C24XX SD/MMC Driver
M: Ben Dooks <ben-linux@fluff.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -9456,8 +9482,8 @@ F: include/uapi/linux/phantom.h
SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
M: Jayamohan Kallickal <jayamohan.kallickal@avagotech.com>
-M: Minh Tran <minh.tran@avagotech.com>
-M: John Soni Jose <sony.john-n@avagotech.com>
+M: Ketan Mukadam <ketan.mukadam@avagotech.com>
+M: John Soni Jose <sony.john@avagotech.com>
L: linux-scsi@vger.kernel.org
W: http://www.avagotech.com
S: Supported
@@ -10730,6 +10756,7 @@ F: drivers/media/pci/tw68/
TPM DEVICE DRIVER
M: Peter Huewe <peterhuewe@gmx.de>
M: Marcel Selhorst <tpmdd@selhorst.net>
+M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
R: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
W: http://tpmdd.sourceforge.net
L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers)
@@ -11321,6 +11348,13 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/via/via-velocity.*
+VIRT LIB
+M: Alex Williamson <alex.williamson@redhat.com>
+M: Paolo Bonzini <pbonzini@redhat.com>
+L: kvm@vger.kernel.org
+S: Supported
+F: virt/lib/
+
VIVID VIRTUAL VIDEO DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
diff --git a/Makefile b/Makefile
index d5b37391195f..69be581e7c7a 100644
--- a/Makefile
+++ b/Makefile
@@ -550,6 +550,7 @@ drivers-y := drivers/ sound/ firmware/
net-y := net/
libs-y := lib/
core-y := usr/
+virt-y := virt/
endif # KBUILD_EXTMOD
ifeq ($(dot-config),1)
@@ -882,10 +883,10 @@ core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
- $(net-y) $(net-m) $(libs-y) $(libs-m)))
+ $(net-y) $(net-m) $(libs-y) $(libs-m) $(virt-y)))
vmlinux-alldirs := $(sort $(vmlinux-dirs) $(patsubst %/,%,$(filter %/, \
- $(init-) $(core-) $(drivers-) $(net-) $(libs-))))
+ $(init-) $(core-) $(drivers-) $(net-) $(libs-) $(virt-))))
init-y := $(patsubst %/, %/built-in.o, $(init-y))
core-y := $(patsubst %/, %/built-in.o, $(core-y))
@@ -894,14 +895,15 @@ net-y := $(patsubst %/, %/built-in.o, $(net-y))
libs-y1 := $(patsubst %/, %/lib.a, $(libs-y))
libs-y2 := $(patsubst %/, %/built-in.o, $(libs-y))
libs-y := $(libs-y1) $(libs-y2)
+virt-y := $(patsubst %/, %/built-in.o, $(virt-y))
# Externally visible symbols (used by link-vmlinux.sh)
export KBUILD_VMLINUX_INIT := $(head-y) $(init-y)
-export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y) $(drivers-y) $(net-y)
+export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y) $(drivers-y) $(net-y) $(virt-y)
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
export LDFLAGS_vmlinux
# used by scripts/pacmage/Makefile
-export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools virt)
+export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools)
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN)
@@ -1336,7 +1338,7 @@ $(help-board-dirs): help-%:
# Documentation targets
# ---------------------------------------------------------------------------
%docs: scripts_basic FORCE
- $(Q)$(MAKE) $(build)=scripts build_docproc
+ $(Q)$(MAKE) $(build)=scripts build_docproc build_check-lc_ctype
$(Q)$(MAKE) $(build)=Documentation/DocBook $@
else # KBUILD_EXTMOD
diff --git a/README b/README
index a326a6a6a46f..f4756ee1c918 100644
--- a/README
+++ b/README
@@ -24,7 +24,7 @@ ON WHAT HARDWARE DOES IT RUN?
today Linux also runs on (at least) the Compaq Alpha AXP, Sun SPARC and
UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH, Cell,
IBM S/390, MIPS, HP PA-RISC, Intel IA-64, DEC VAX, AMD x86-64, AXIS CRIS,
- Xtensa, Tilera TILE, AVR32 and Renesas M32R architectures.
+ Xtensa, Tilera TILE, AVR32, ARC and Renesas M32R architectures.
Linux is easily portable to most general-purpose 32- or 64-bit architectures
as long as they have a paged memory management unit (PMMU) and a port of the
diff --git a/arch/arm/boot/dts/twl4030.dtsi b/arch/arm/boot/dts/twl4030.dtsi
index 36ae9160b558..482b7aa37808 100644
--- a/arch/arm/boot/dts/twl4030.dtsi
+++ b/arch/arm/boot/dts/twl4030.dtsi
@@ -22,6 +22,8 @@
charger: bci {
compatible = "ti,twl4030-bci";
interrupts = <9>, <2>;
+ io-channels = <&twl4030_madc 11>;
+ io-channel-name = "vac";
bci3v1-supply = <&vusb3v1>;
};
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 3f15a5cae167..c5e1943e5427 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -246,7 +246,7 @@ CONFIG_GPIO_TWL4030=y
CONFIG_GPIO_PALMAS=y
CONFIG_W1=m
CONFIG_HDQ_MASTER_OMAP=m
-CONFIG_BATTERY_BQ27x00=m
+CONFIG_BATTERY_BQ27XXX=m
CONFIG_CHARGER_ISP1704=m
CONFIG_CHARGER_TWL4030=m
CONFIG_CHARGER_BQ2415X=m
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index d995821f1698..dc641ddf0784 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -218,4 +218,24 @@
#define HSR_DABT_CM (1U << 8)
#define HSR_DABT_EA (1U << 9)
+#define kvm_arm_exception_type \
+ {0, "RESET" }, \
+ {1, "UNDEFINED" }, \
+ {2, "SOFTWARE" }, \
+ {3, "PREF_ABORT" }, \
+ {4, "DATA_ABORT" }, \
+ {5, "IRQ" }, \
+ {6, "FIQ" }, \
+ {7, "HVC" }
+
+#define HSRECN(x) { HSR_EC_##x, #x }
+
+#define kvm_arm_exception_class \
+ HSRECN(UNKNOWN), HSRECN(WFI), HSRECN(CP15_32), HSRECN(CP15_64), \
+ HSRECN(CP14_MR), HSRECN(CP14_LS), HSRECN(CP_0_13), HSRECN(CP10_ID), \
+ HSRECN(JAZELLE), HSRECN(BXJ), HSRECN(CP14_64), HSRECN(SVC_HYP), \
+ HSRECN(HVC), HSRECN(SMC), HSRECN(IABT), HSRECN(IABT_HYP), \
+ HSRECN(DABT), HSRECN(DABT_HYP)
+
+
#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index c4072d9f32c7..6692982c9b57 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -126,7 +126,10 @@ struct kvm_vcpu_arch {
* here.
*/
- /* Don't run the guest on this vcpu */
+ /* vcpu power-off state */
+ bool power_off;
+
+ /* Don't run the guest (internal implementation need) */
bool pause;
/* IO related fields */
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 356970f3b25e..95a000515e43 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -46,4 +46,6 @@ config KVM_ARM_HOST
---help---
Provides host support for ARM processors.
+source drivers/vhost/Kconfig
+
endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 78b286994577..eab83b2435b8 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -271,6 +271,16 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvm_timer_should_fire(vcpu);
}
+void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
+{
+ kvm_timer_schedule(vcpu);
+}
+
+void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
+{
+ kvm_timer_unschedule(vcpu);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
/* Force users to call KVM_ARM_VCPU_INIT */
@@ -308,7 +318,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- if (vcpu->arch.pause)
+ if (vcpu->arch.power_off)
mp_state->mp_state = KVM_MP_STATE_STOPPED;
else
mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
@@ -321,10 +331,10 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
{
switch (mp_state->mp_state) {
case KVM_MP_STATE_RUNNABLE:
- vcpu->arch.pause = false;
+ vcpu->arch.power_off = false;
break;
case KVM_MP_STATE_STOPPED:
- vcpu->arch.pause = true;
+ vcpu->arch.power_off = true;
break;
default:
return -EINVAL;
@@ -342,7 +352,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
*/
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
- return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v);
+ return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v))
+ && !v->arch.power_off && !v->arch.pause);
}
/* Just ensure a guest exit from a particular CPU */
@@ -468,11 +479,38 @@ bool kvm_arch_intc_initialized(struct kvm *kvm)
return vgic_initialized(kvm);
}
-static void vcpu_pause(struct kvm_vcpu *vcpu)
+static void kvm_arm_halt_guest(struct kvm *kvm) __maybe_unused;
+static void kvm_arm_resume_guest(struct kvm *kvm) __maybe_unused;
+
+static void kvm_arm_halt_guest(struct kvm *kvm)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ vcpu->arch.pause = true;
+ force_vm_exit(cpu_all_mask);
+}
+
+static void kvm_arm_resume_guest(struct kvm *kvm)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
+
+ vcpu->arch.pause = false;
+ wake_up_interruptible(wq);
+ }
+}
+
+static void vcpu_sleep(struct kvm_vcpu *vcpu)
{
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
- wait_event_interruptible(*wq, !vcpu->arch.pause);
+ wait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+ (!vcpu->arch.pause)));
}
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
@@ -522,8 +560,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
update_vttbr(vcpu->kvm);
- if (vcpu->arch.pause)
- vcpu_pause(vcpu);
+ if (vcpu->arch.power_off || vcpu->arch.pause)
+ vcpu_sleep(vcpu);
/*
* Disarming the background timer must be done in a
@@ -549,11 +587,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
run->exit_reason = KVM_EXIT_INTR;
}
- if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
+ if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
+ vcpu->arch.power_off || vcpu->arch.pause) {
local_irq_enable();
+ kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
preempt_enable();
- kvm_timer_sync_hwstate(vcpu);
continue;
}
@@ -596,14 +635,19 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
* guest time.
*/
kvm_guest_exit();
- trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
+ trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
+
+ /*
+ * We must sync the timer state before the vgic state so that
+ * the vgic can properly sample the updated state of the
+ * interrupt line.
+ */
+ kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
preempt_enable();
- kvm_timer_sync_hwstate(vcpu);
-
ret = handle_exit(vcpu, run, ret);
}
@@ -765,12 +809,12 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
vcpu_reset_hcr(vcpu);
/*
- * Handle the "start in power-off" case by marking the VCPU as paused.
+ * Handle the "start in power-off" case.
*/
if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
- vcpu->arch.pause = true;
+ vcpu->arch.power_off = true;
else
- vcpu->arch.pause = false;
+ vcpu->arch.power_off = false;
return 0;
}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index ad6f6424f1d1..0b556968a6da 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -63,7 +63,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
{
- vcpu->arch.pause = true;
+ vcpu->arch.power_off = true;
}
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
@@ -87,7 +87,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
- if (!vcpu->arch.pause) {
+ if (!vcpu->arch.power_off) {
if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
else
@@ -115,7 +115,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
* the general puspose registers are undefined upon CPU_ON.
*/
*vcpu_reg(vcpu, 0) = context_id;
- vcpu->arch.pause = false;
+ vcpu->arch.power_off = false;
smp_mb(); /* Make sure the above is visible */
wq = kvm_arch_vcpu_wq(vcpu);
@@ -153,7 +153,7 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
mpidr = kvm_vcpu_get_mpidr_aff(tmp);
if ((mpidr & target_affinity_mask) == target_affinity) {
matching_cpus++;
- if (!tmp->arch.pause)
+ if (!tmp->arch.power_off)
return PSCI_0_2_AFFINITY_LEVEL_ON;
}
}
@@ -179,7 +179,7 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
* re-initialized.
*/
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
- tmp->arch.pause = true;
+ tmp->arch.power_off = true;
kvm_vcpu_kick(tmp);
}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index 0ec35392d208..c25a88598eb0 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -25,21 +25,25 @@ TRACE_EVENT(kvm_entry,
);
TRACE_EVENT(kvm_exit,
- TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc),
- TP_ARGS(exit_reason, vcpu_pc),
+ TP_PROTO(int idx, unsigned int exit_reason, unsigned long vcpu_pc),
+ TP_ARGS(idx, exit_reason, vcpu_pc),
TP_STRUCT__entry(
+ __field( int, idx )
__field( unsigned int, exit_reason )
__field( unsigned long, vcpu_pc )
),
TP_fast_assign(
+ __entry->idx = idx;
__entry->exit_reason = exit_reason;
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("HSR_EC: 0x%04x, PC: 0x%08lx",
+ TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
+ __print_symbolic(__entry->idx, kvm_arm_exception_type),
__entry->exit_reason,
+ __print_symbolic(__entry->exit_reason, kvm_arm_exception_class),
__entry->vcpu_pc)
);
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 89a755b90db2..92673006e55c 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -102,6 +102,9 @@ config HAVE_AT91_SMD
config HAVE_AT91_H32MX
bool
+config HAVE_AT91_GENERATED_CLK
+ bool
+
config SOC_SAM_V4_V5
bool
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 1319c3c14327..84bd26535ae9 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -14,7 +14,7 @@ config ARCH_BCM_IPROC
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select ARM_GLOBAL_TIMER
-
+ select COMMON_CLK_IPROC
select CLKSRC_MMIO
select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
diff --git a/arch/arm/mach-u300/dummyspichip.c b/arch/arm/mach-u300/dummyspichip.c
index 131996805690..68fe986ca42e 100644
--- a/arch/arm/mach-u300/dummyspichip.c
+++ b/arch/arm/mach-u300/dummyspichip.c
@@ -264,7 +264,6 @@ static const struct of_device_id pl022_dummy_dt_match[] = {
static struct spi_driver pl022_dummy_driver = {
.driver = {
.name = "spi-dummy",
- .owner = THIS_MODULE,
.of_match_table = pl022_dummy_dt_match,
},
.probe = pl022_dummy_probe,
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7b10647cab22..851fe11c6069 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -76,6 +76,7 @@ config ARM64
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS
+ select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index cfdb34bedbcd..54d0ead41afc 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -54,16 +54,15 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return __generic_dma_ops(dev);
}
-static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- struct iommu_ops *iommu, bool coherent)
-{
- if (!acpi_disabled && !dev->archdata.dma_ops)
- dev->archdata.dma_ops = dma_ops;
-
- dev->archdata.dma_coherent = coherent;
-}
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu, bool coherent);
#define arch_setup_dma_ops arch_setup_dma_ops
+#ifdef CONFIG_IOMMU_DMA
+void arch_teardown_dma_ops(struct device *dev);
+#define arch_teardown_dma_ops arch_teardown_dma_ops
+#endif
+
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)
{
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 9694f2654593..5e6857b6bdc4 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -200,4 +200,20 @@
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf))
+#define kvm_arm_exception_type \
+ {0, "IRQ" }, \
+ {1, "TRAP" }
+
+#define ECN(x) { ESR_ELx_EC_##x, #x }
+
+#define kvm_arm_exception_class \
+ ECN(UNKNOWN), ECN(WFx), ECN(CP15_32), ECN(CP15_64), ECN(CP14_MR), \
+ ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(CP14_64), ECN(SVC64), \
+ ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(IMP_DEF), ECN(IABT_LOW), \
+ ECN(IABT_CUR), ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \
+ ECN(SP_ALIGN), ECN(FP_EXC32), ECN(FP_EXC64), ECN(SERROR), \
+ ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \
+ ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
+ ECN(BKPT32), ECN(VECTOR32), ECN(BRK64)
+
#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index ed039688c221..a35ce7266aac 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -149,7 +149,10 @@ struct kvm_vcpu_arch {
u32 mdscr_el1;
} guest_debug_preserved;
- /* Don't run the guest */
+ /* vcpu power-off state */
+ bool power_off;
+
+ /* Don't run the guest (internal implementation need) */
bool pause;
/* IO related fields */
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index c9d1f34daab1..a5272c07d1cb 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -48,4 +48,6 @@ config KVM_ARM_HOST
---help---
Provides host support for ARM processors.
+source drivers/vhost/Kconfig
+
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index e5836138ec42..1599701ef044 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -880,6 +880,14 @@ __kvm_hyp_panic:
bl __restore_sysregs
+ /*
+ * Make sure we have a valid host stack, and don't leave junk in the
+ * frame pointer that will give us a misleading host stack unwinding.
+ */
+ ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
+ msr sp_el1, x22
+ mov x29, xzr
+
1: adr x0, __hyp_panic_str
adr x1, 2f
ldp x2, x3, [x1]
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 99224dcebdc5..6320361d8d4c 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -533,3 +533,460 @@ static int __init dma_debug_do_init(void)
return 0;
}
fs_initcall(dma_debug_do_init);
+
+
+#ifdef CONFIG_IOMMU_DMA
+#include <linux/dma-iommu.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+
+/* Thankfully, all cache ops are by VA so we can ignore phys here */
+static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
+{
+ __dma_flush_range(virt, virt + PAGE_SIZE);
+}
+
+static void *__iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp,
+ struct dma_attrs *attrs)
+{
+ bool coherent = is_device_dma_coherent(dev);
+ int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
+ void *addr;
+
+ if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
+ return NULL;
+ /*
+ * Some drivers rely on this, and we probably don't want the
+ * possibility of stale kernel data being read by devices anyway.
+ */
+ gfp |= __GFP_ZERO;
+
+ if (gfp & __GFP_WAIT) {
+ struct page **pages;
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+
+ pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle,
+ flush_page);
+ if (!pages)
+ return NULL;
+
+ addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+ __builtin_return_address(0));
+ if (!addr)
+ iommu_dma_free(dev, pages, size, handle);
+ } else {
+ struct page *page;
+ /*
+ * In atomic context we can't remap anything, so we'll only
+ * get the virtually contiguous buffer we need by way of a
+ * physically contiguous allocation.
+ */
+ if (coherent) {
+ page = alloc_pages(gfp, get_order(size));
+ addr = page ? page_address(page) : NULL;
+ } else {
+ addr = __alloc_from_pool(size, &page, gfp);
+ }
+ if (!addr)
+ return NULL;
+
+ *handle = iommu_dma_map_page(dev, page, 0, size, ioprot);
+ if (iommu_dma_mapping_error(dev, *handle)) {
+ if (coherent)
+ __free_pages(page, get_order(size));
+ else
+ __free_from_pool(addr, size);
+ addr = NULL;
+ }
+ }
+ return addr;
+}
+
+static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
+{
+ /*
+ * @cpu_addr will be one of 3 things depending on how it was allocated:
+ * - A remapped array of pages from iommu_dma_alloc(), for all
+ * non-atomic allocations.
+ * - A non-cacheable alias from the atomic pool, for atomic
+ * allocations by non-coherent devices.
+ * - A normal lowmem address, for atomic allocations by
+ * coherent devices.
+ * Hence how dodgy the below logic looks...
+ */
+ if (__in_atomic_pool(cpu_addr, size)) {
+ iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+ __free_from_pool(cpu_addr, size);
+ } else if (is_vmalloc_addr(cpu_addr)){
+ struct vm_struct *area = find_vm_area(cpu_addr);
+
+ if (WARN_ON(!area || !area->pages))
+ return;
+ iommu_dma_free(dev, area->pages, size, &handle);
+ dma_common_free_remap(cpu_addr, size, VM_USERMAP);
+ } else {
+ iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+ __free_pages(virt_to_page(cpu_addr), get_order(size));
+ }
+}
+
+static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ struct vm_struct *area;
+ int ret;
+
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+ is_device_dma_coherent(dev));
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ area = find_vm_area(cpu_addr);
+ if (WARN_ON(!area || !area->pages))
+ return -ENXIO;
+
+ return iommu_dma_mmap(area->pages, size, vma);
+}
+
+static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, struct dma_attrs *attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct vm_struct *area = find_vm_area(cpu_addr);
+
+ if (WARN_ON(!area || !area->pages))
+ return -ENXIO;
+
+ return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
+ GFP_KERNEL);
+}
+
+static void __iommu_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ phys_addr_t phys;
+
+ if (is_device_dma_coherent(dev))
+ return;
+
+ phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
+ __dma_unmap_area(phys_to_virt(phys), size, dir);
+}
+
+static void __iommu_sync_single_for_device(struct device *dev,
+ dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ phys_addr_t phys;
+
+ if (is_device_dma_coherent(dev))
+ return;
+
+ phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
+ __dma_map_area(phys_to_virt(phys), size, dir);
+}
+
+static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ bool coherent = is_device_dma_coherent(dev);
+ int prot = dma_direction_to_prot(dir, coherent);
+ dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
+
+ if (!iommu_dma_mapping_error(dev, dev_addr) &&
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __iommu_sync_single_for_device(dev, dev_addr, size, dir);
+
+ return dev_addr;
+}
+
+static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
+
+ iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
+}
+
+static void __iommu_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ if (is_device_dma_coherent(dev))
+ return;
+
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_unmap_area(sg_virt(sg), sg->length, dir);
+}
+
+static void __iommu_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ if (is_device_dma_coherent(dev))
+ return;
+
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_map_area(sg_virt(sg), sg->length, dir);
+}
+
+static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ bool coherent = is_device_dma_coherent(dev);
+
+ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
+
+ return iommu_dma_map_sg(dev, sgl, nelems,
+ dma_direction_to_prot(dir, coherent));
+}
+
+static void __iommu_unmap_sg_attrs(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
+
+ iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
+}
+
+static struct dma_map_ops iommu_dma_ops = {
+ .alloc = __iommu_alloc_attrs,
+ .free = __iommu_free_attrs,
+ .mmap = __iommu_mmap_attrs,
+ .get_sgtable = __iommu_get_sgtable,
+ .map_page = __iommu_map_page,
+ .unmap_page = __iommu_unmap_page,
+ .map_sg = __iommu_map_sg_attrs,
+ .unmap_sg = __iommu_unmap_sg_attrs,
+ .sync_single_for_cpu = __iommu_sync_single_for_cpu,
+ .sync_single_for_device = __iommu_sync_single_for_device,
+ .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
+ .sync_sg_for_device = __iommu_sync_sg_for_device,
+ .dma_supported = iommu_dma_supported,
+ .mapping_error = iommu_dma_mapping_error,
+};
+
+/*
+ * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
+ * everything it needs to - the device is only partially created and the
+ * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
+ * need this delayed attachment dance. Once IOMMU probe ordering is sorted
+ * to move the arch_setup_dma_ops() call later, all the notifier bits below
+ * become unnecessary, and will go away.
+ */
+struct iommu_dma_notifier_data {
+ struct list_head list;
+ struct device *dev;
+ const struct iommu_ops *ops;
+ u64 dma_base;
+ u64 size;
+};
+static LIST_HEAD(iommu_dma_masters);
+static DEFINE_MUTEX(iommu_dma_notifier_lock);
+
+/*
+ * Temporarily "borrow" a domain feature flag to to tell if we had to resort
+ * to creating our own domain here, in case we need to clean it up again.
+ */
+#define __IOMMU_DOMAIN_FAKE_DEFAULT (1U << 31)
+
+static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
+ u64 dma_base, u64 size)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+
+ /*
+ * Best case: The device is either part of a group which was
+ * already attached to a domain in a previous call, or it's
+ * been put in a default DMA domain by the IOMMU core.
+ */
+ if (!domain) {
+ /*
+ * Urgh. The IOMMU core isn't going to do default domains
+ * for non-PCI devices anyway, until it has some means of
+ * abstracting the entirely implementation-specific
+ * sideband data/SoC topology/unicorn dust that may or
+ * may not differentiate upstream masters.
+ * So until then, HORRIBLE HACKS!
+ */
+ domain = ops->domain_alloc(IOMMU_DOMAIN_DMA);
+ if (!domain)
+ goto out_no_domain;
+
+ domain->ops = ops;
+ domain->type = IOMMU_DOMAIN_DMA | __IOMMU_DOMAIN_FAKE_DEFAULT;
+
+ if (iommu_attach_device(domain, dev))
+ goto out_put_domain;
+ }
+
+ if (iommu_dma_init_domain(domain, dma_base, size))
+ goto out_detach;
+
+ dev->archdata.dma_ops = &iommu_dma_ops;
+ return true;
+
+out_detach:
+ iommu_detach_device(domain, dev);
+out_put_domain:
+ if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
+ iommu_domain_free(domain);
+out_no_domain:
+ pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+ dev_name(dev));
+ return false;
+}
+
+static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
+ u64 dma_base, u64 size)
+{
+ struct iommu_dma_notifier_data *iommudata;
+
+ iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
+ if (!iommudata)
+ return;
+
+ iommudata->dev = dev;
+ iommudata->ops = ops;
+ iommudata->dma_base = dma_base;
+ iommudata->size = size;
+
+ mutex_lock(&iommu_dma_notifier_lock);
+ list_add(&iommudata->list, &iommu_dma_masters);
+ mutex_unlock(&iommu_dma_notifier_lock);
+}
+
+static int __iommu_attach_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct iommu_dma_notifier_data *master, *tmp;
+
+ if (action != BUS_NOTIFY_ADD_DEVICE)
+ return 0;
+
+ mutex_lock(&iommu_dma_notifier_lock);
+ list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
+ if (do_iommu_attach(master->dev, master->ops,
+ master->dma_base, master->size)) {
+ list_del(&master->list);
+ kfree(master);
+ }
+ }
+ mutex_unlock(&iommu_dma_notifier_lock);
+ return 0;
+}
+
+static int register_iommu_dma_ops_notifier(struct bus_type *bus)
+{
+ struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
+ int ret;
+
+ if (!nb)
+ return -ENOMEM;
+ /*
+ * The device must be attached to a domain before the driver probe
+ * routine gets a chance to start allocating DMA buffers. However,
+ * the IOMMU driver also needs a chance to configure the iommu_group
+ * via its add_device callback first, so we need to make the attach
+ * happen between those two points. Since the IOMMU core uses a bus
+ * notifier with default priority for add_device, do the same but
+ * with a lower priority to ensure the appropriate ordering.
+ */
+ nb->notifier_call = __iommu_attach_notifier;
+ nb->priority = -100;
+
+ ret = bus_register_notifier(bus, nb);
+ if (ret) {
+ pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
+ bus->name);
+ kfree(nb);
+ }
+ return ret;
+}
+
+static int __init __iommu_dma_init(void)
+{
+ int ret;
+
+ ret = iommu_dma_init();
+ if (!ret)
+ ret = register_iommu_dma_ops_notifier(&platform_bus_type);
+ if (!ret)
+ ret = register_iommu_dma_ops_notifier(&amba_bustype);
+ return ret;
+}
+arch_initcall(__iommu_dma_init);
+
+static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *ops)
+{
+ struct iommu_group *group;
+
+ if (!ops)
+ return;
+ /*
+ * TODO: As a concession to the future, we're ready to handle being
+ * called both early and late (i.e. after bus_add_device). Once all
+ * the platform bus code is reworked to call us late and the notifier
+ * junk above goes away, move the body of do_iommu_attach here.
+ */
+ group = iommu_group_get(dev);
+ if (group) {
+ do_iommu_attach(dev, ops, dma_base, size);
+ iommu_group_put(group);
+ } else {
+ queue_iommu_attach(dev, ops, dma_base, size);
+ }
+}
+
+void arch_teardown_dma_ops(struct device *dev)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+
+ if (domain) {
+ iommu_detach_device(domain, dev);
+ if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
+ iommu_domain_free(domain);
+ }
+
+ dev->archdata.dma_ops = NULL;
+}
+
+#else
+
+static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu)
+{ }
+
+#endif /* CONFIG_IOMMU_DMA */
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu, bool coherent)
+{
+ if (!acpi_disabled && !dev->archdata.dma_ops)
+ dev->archdata.dma_ops = dma_ops;
+
+ dev->archdata.dma_coherent = coherent;
+ __iommu_setup_dma_ops(dev, dma_base, size, iommu);
+}
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index ad448e41e3bd..232385441e46 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -18,29 +18,6 @@
#include <bcm63xx_dev_spi.h>
#include <bcm63xx_regs.h>
-/*
- * register offsets
- */
-static const unsigned long bcm6348_regs_spi[] = {
- __GEN_SPI_REGS_TABLE(6348)
-};
-
-static const unsigned long bcm6358_regs_spi[] = {
- __GEN_SPI_REGS_TABLE(6358)
-};
-
-const unsigned long *bcm63xx_regs_spi;
-EXPORT_SYMBOL(bcm63xx_regs_spi);
-
-static __init void bcm63xx_spi_regs_init(void)
-{
- if (BCMCPU_IS_6338() || BCMCPU_IS_6348())
- bcm63xx_regs_spi = bcm6348_regs_spi;
- if (BCMCPU_IS_3368() || BCMCPU_IS_6358() ||
- BCMCPU_IS_6362() || BCMCPU_IS_6368())
- bcm63xx_regs_spi = bcm6358_regs_spi;
-}
-
static struct resource spi_resources[] = {
{
.start = -1, /* filled at runtime */
@@ -53,19 +30,10 @@ static struct resource spi_resources[] = {
},
};
-static struct bcm63xx_spi_pdata spi_pdata = {
- .bus_num = 0,
- .num_chipselect = 8,
-};
-
static struct platform_device bcm63xx_spi_device = {
- .name = "bcm63xx-spi",
.id = -1,
.num_resources = ARRAY_SIZE(spi_resources),
.resource = spi_resources,
- .dev = {
- .platform_data = &spi_pdata,
- },
};
int __init bcm63xx_spi_register(void)
@@ -78,21 +46,15 @@ int __init bcm63xx_spi_register(void)
spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
+ bcm63xx_spi_device.name = "bcm6348-spi",
spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1;
- spi_pdata.fifo_size = SPI_6348_MSG_DATA_SIZE;
- spi_pdata.msg_type_shift = SPI_6348_MSG_TYPE_SHIFT;
- spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH;
}
if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6362() ||
BCMCPU_IS_6368()) {
+ bcm63xx_spi_device.name = "bcm6358-spi",
spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
- spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
- spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
- spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH;
}
- bcm63xx_spi_regs_init();
-
return platform_device_register(&bcm63xx_spi_device);
}
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 5a1a882e0a75..6ded8d347af9 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -847,5 +847,7 @@ static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index 25737655d141..dd299548860d 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -7,48 +7,4 @@
int __init bcm63xx_spi_register(void);
-struct bcm63xx_spi_pdata {
- unsigned int fifo_size;
- unsigned int msg_type_shift;
- unsigned int msg_ctl_width;
- int bus_num;
- int num_chipselect;
-};
-
-enum bcm63xx_regs_spi {
- SPI_CMD,
- SPI_INT_STATUS,
- SPI_INT_MASK_ST,
- SPI_INT_MASK,
- SPI_ST,
- SPI_CLK_CFG,
- SPI_FILL_BYTE,
- SPI_MSG_TAIL,
- SPI_RX_TAIL,
- SPI_MSG_CTL,
- SPI_MSG_DATA,
- SPI_RX_DATA,
-};
-
-#define __GEN_SPI_REGS_TABLE(__cpu) \
- [SPI_CMD] = SPI_## __cpu ##_CMD, \
- [SPI_INT_STATUS] = SPI_## __cpu ##_INT_STATUS, \
- [SPI_INT_MASK_ST] = SPI_## __cpu ##_INT_MASK_ST, \
- [SPI_INT_MASK] = SPI_## __cpu ##_INT_MASK, \
- [SPI_ST] = SPI_## __cpu ##_ST, \
- [SPI_CLK_CFG] = SPI_## __cpu ##_CLK_CFG, \
- [SPI_FILL_BYTE] = SPI_## __cpu ##_FILL_BYTE, \
- [SPI_MSG_TAIL] = SPI_## __cpu ##_MSG_TAIL, \
- [SPI_RX_TAIL] = SPI_## __cpu ##_RX_TAIL, \
- [SPI_MSG_CTL] = SPI_## __cpu ##_MSG_CTL, \
- [SPI_MSG_DATA] = SPI_## __cpu ##_MSG_DATA, \
- [SPI_RX_DATA] = SPI_## __cpu ##_RX_DATA,
-
-static inline unsigned long bcm63xx_spireg(enum bcm63xx_regs_spi reg)
-{
- extern const unsigned long *bcm63xx_regs_spi;
-
- return bcm63xx_regs_spi[reg];
-}
-
#endif /* BCM63XX_DEV_SPI_H */
diff --git a/arch/mips/txx9/generic/spi_eeprom.c b/arch/mips/txx9/generic/spi_eeprom.c
index 3dbad99d5611..d833dd2c9b55 100644
--- a/arch/mips/txx9/generic/spi_eeprom.c
+++ b/arch/mips/txx9/generic/spi_eeprom.c
@@ -80,7 +80,6 @@ static int __init early_seeprom_probe(struct spi_device *spi)
static struct spi_driver early_seeprom_driver __initdata = {
.driver = {
.name = "at25",
- .owner = THIS_MODULE,
},
.probe = early_seeprom_probe,
};
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
index 6330a61b875a..4852e849128b 100644
--- a/arch/powerpc/include/asm/disassemble.h
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -42,6 +42,11 @@ static inline unsigned int get_dcrn(u32 inst)
return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
}
+static inline unsigned int get_tmrn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
static inline unsigned int get_rt(u32 inst)
{
return (inst >> 21) & 0x1f;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 887c259556df..cfa758c6b4f6 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -716,5 +716,7 @@ static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslot
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_exit(void) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 16547efa2d5a..2fef74b474f0 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -742,6 +742,12 @@
#define MMUBE1_VBE4 0x00000002
#define MMUBE1_VBE5 0x00000001
+#define TMRN_TMCFG0 16 /* Thread Management Configuration Register 0 */
+#define TMRN_TMCFG0_NPRIBITS 0x003f0000 /* Bits of thread priority */
+#define TMRN_TMCFG0_NPRIBITS_SHIFT 16
+#define TMRN_TMCFG0_NATHRD 0x00003f00 /* Number of active threads */
+#define TMRN_TMCFG0_NATHRD_SHIFT 8
+#define TMRN_TMCFG0_NTHRD 0x0000003f /* Number of threads */
#define TMRN_IMSR0 0x120 /* Initial MSR Register 0 (e6500) */
#define TMRN_IMSR1 0x121 /* Initial MSR Register 1 (e6500) */
#define TMRN_INIA0 0x140 /* Next Instruction Address Register 0 */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 15099c41622e..92dea8df6b26 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1425,27 +1425,45 @@ static void __init prom_instantiate_sml(void)
{
phandle ibmvtpm_node;
ihandle ibmvtpm_inst;
- u32 entry = 0, size = 0;
+ u32 entry = 0, size = 0, succ = 0;
u64 base;
+ __be32 val;
prom_debug("prom_instantiate_sml: start...\n");
- ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/ibm,vtpm"));
+ ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
if (!PHANDLE_VALID(ibmvtpm_node))
return;
- ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/ibm,vtpm"));
+ ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
if (!IHANDLE_VALID(ibmvtpm_inst)) {
prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
return;
}
- if (call_prom_ret("call-method", 2, 2, &size,
- ADDR("sml-get-handover-size"),
- ibmvtpm_inst) != 0 || size == 0) {
- prom_printf("SML get handover size failed\n");
- return;
+ if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
+ &val, sizeof(val)) != PROM_ERROR) {
+ if (call_prom_ret("call-method", 2, 2, &succ,
+ ADDR("reformat-sml-to-efi-alignment"),
+ ibmvtpm_inst) != 0 || succ == 0) {
+ prom_printf("Reformat SML to EFI alignment failed\n");
+ return;
+ }
+
+ if (call_prom_ret("call-method", 2, 2, &size,
+ ADDR("sml-get-allocated-size"),
+ ibmvtpm_inst) != 0 || size == 0) {
+ prom_printf("SML get allocated size failed\n");
+ return;
+ }
+ } else {
+ if (call_prom_ret("call-method", 2, 2, &size,
+ ADDR("sml-get-handover-size"),
+ ibmvtpm_inst) != 0 || size == 0) {
+ prom_printf("SML get handover size failed\n");
+ return;
+ }
}
base = alloc_down(size, PAGE_SIZE, 0);
@@ -1454,6 +1472,8 @@ static void __init prom_instantiate_sml(void)
prom_printf("instantiating sml at 0x%x...", base);
+ memset((void *)base, 0, size);
+
if (call_prom_ret("call-method", 4, 2, &entry,
ADDR("sml-handover"),
ibmvtpm_inst, size, base) != 0 || entry == 0) {
@@ -1464,9 +1484,9 @@ static void __init prom_instantiate_sml(void)
reserve_mem(base, size);
- prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-base",
+ prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
&base, sizeof(base));
- prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-size",
+ prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
&size, sizeof(size));
prom_debug("sml base = 0x%x\n", base);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 1f9c0a17f445..10722b1e38b5 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -70,7 +70,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
}
/* Lastly try successively smaller sizes from the page allocator */
- while (!hpt && order > PPC_MIN_HPT_ORDER) {
+ /* Only do this if userspace didn't specify a size via ioctl */
+ while (!hpt && order > PPC_MIN_HPT_ORDER && !htab_orderp) {
hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
__GFP_NOWARN, order - PAGE_SHIFT);
if (!hpt)
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index c1df9bb1e413..97e7f8c853d8 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -470,6 +470,8 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
note_hpte_modification(kvm, rev);
unlock_hpte(hpte, 0);
+ if (v & HPTE_V_ABSENT)
+ v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
hpret[0] = v;
hpret[1] = r;
return H_SUCCESS;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b98889e9851d..b1dab8d1d885 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -150,6 +150,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beq 11f
+ cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
+ beq 15f /* Invoke the H_DOORBELL handler */
cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
beq cr2, 14f /* HMI check */
@@ -174,6 +176,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtspr SPRN_HSRR1, r7
b hmi_exception_after_realmode
+15: mtspr SPRN_HSRR0, r8
+ mtspr SPRN_HSRR1, r7
+ ba 0xe80
+
kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
@@ -2377,7 +2383,6 @@ machine_check_realmode:
mr r3, r9 /* get vcpu pointer */
bl kvmppc_realmode_machine_check
nop
- cmpdi r3, 0 /* Did we handle MCE ? */
ld r9, HSTATE_KVM_VCPU(r13)
li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
/*
@@ -2390,13 +2395,18 @@ machine_check_realmode:
* The old code used to return to host for unhandled errors which
* was causing guest to hang with soft lockups inside guest and
* makes it difficult to recover guest instance.
+ *
+ * if we receive machine check with MSR(RI=0) then deliver it to
+ * guest as machine check causing guest to crash.
*/
- ld r10, VCPU_PC(r9)
ld r11, VCPU_MSR(r9)
+ andi. r10, r11, MSR_RI /* check for unrecoverable exception */
+ beq 1f /* Deliver a machine check to guest */
+ ld r10, VCPU_PC(r9)
+ cmpdi r3, 0 /* Did we handle MCE ? */
bne 2f /* Continue guest execution. */
/* If not, deliver a machine check. SRR0/1 are already set */
- li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
- ld r11, VCPU_MSR(r9)
+1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
bl kvmppc_msr_interrupt
2: b fast_interrupt_c_return
@@ -2436,14 +2446,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* hypervisor doorbell */
3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
+
+ /*
+ * Clear the doorbell as we will invoke the handler
+ * explicitly in the guest exit path.
+ */
+ lis r6, (PPC_DBELL_SERVER << (63-36))@h
+ PPC_MSGCLR(6)
/* see if it's a host IPI */
li r3, 1
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
bnelr
- /* if not, clear it and return -1 */
- lis r6, (PPC_DBELL_SERVER << (63-36))@h
- PPC_MSGCLR(6)
+ /* if not, return -1 */
li r3, -1
blr
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index b29ce752c7d6..32fdab57d604 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -237,7 +237,8 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
struct kvm_book3e_206_tlb_entry *gtlbe)
{
struct vcpu_id_table *idt = vcpu_e500->idt;
- unsigned int pr, tid, ts, pid;
+ unsigned int pr, tid, ts;
+ int pid;
u32 val, eaddr;
unsigned long flags;
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index ce7291c79f6c..990db69a1d0b 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -15,6 +15,7 @@
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include <asm/dbell.h>
+#include <asm/reg_booke.h>
#include "booke.h"
#include "e500.h"
@@ -22,6 +23,7 @@
#define XOP_DCBTLS 166
#define XOP_MSGSND 206
#define XOP_MSGCLR 238
+#define XOP_MFTMR 366
#define XOP_TLBIVAX 786
#define XOP_TLBSX 914
#define XOP_TLBRE 946
@@ -113,6 +115,19 @@ static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
+ int rt)
+{
+ /* Expose one thread per vcpu */
+ if (get_tmrn(inst) == TMRN_TMCFG0) {
+ kvmppc_set_gpr(vcpu, rt,
+ 1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT));
+ return EMULATE_DONE;
+ }
+
+ return EMULATE_FAIL;
+}
+
int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
@@ -165,6 +180,10 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
break;
+ case XOP_MFTMR:
+ emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
+ break;
+
case XOP_EHPRIV:
emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
advance);
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 4d33e199edcc..5e2102c19586 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -406,7 +406,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
unsigned long gfn_start, gfn_end;
- tsize_pages = 1 << (tsize - 2);
+ tsize_pages = 1UL << (tsize - 2);
gfn_start = gfn & ~(tsize_pages - 1);
gfn_end = gfn_start + tsize_pages;
@@ -447,7 +447,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
if (likely(!pfnmap)) {
- tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
+ tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
pfn = gfn_to_pfn_memslot(slot, gfn);
if (is_error_noslot_pfn(pfn)) {
if (printk_ratelimit())
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2e51289610e4..6fd2405c7f4a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -559,6 +559,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
else
r = num_online_cpus();
break;
+ case KVM_CAP_NR_MEMSLOTS:
+ r = KVM_USER_MEM_SLOTS;
+ break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9b9a2db06810..3a55f493c7da 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -584,6 +584,7 @@ menuconfig PCI
bool "PCI support"
select HAVE_DMA_ATTRS
select PCI_MSI
+ select IOMMU_SUPPORT
help
Enable PCI support.
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 7f654308817c..efaac2c3bb77 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -644,5 +644,7 @@ static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslot
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
#endif
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 34d960353a08..c873e682b67f 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -62,6 +62,8 @@ struct zpci_bar_struct {
u8 size; /* order 2 exponent */
};
+struct s390_domain;
+
/* Private data per function */
struct zpci_dev {
struct pci_dev *pdev;
@@ -118,6 +120,8 @@ struct zpci_dev {
struct dentry *debugfs_dev;
struct dentry *debugfs_perf;
+
+ struct s390_domain *s390_domain; /* s390 IOMMU domain data */
};
static inline bool zdev_enabled(struct zpci_dev *zdev)
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index 30b4c179c38c..7a7abf1a5537 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -192,5 +192,8 @@ static inline unsigned long *get_st_pto(unsigned long entry)
/* Prototypes */
int zpci_dma_init_device(struct zpci_dev *);
void zpci_dma_exit_device(struct zpci_dev *);
-
+void dma_free_seg_table(unsigned long);
+unsigned long *dma_alloc_cpu_table(void);
+void dma_cleanup_tables(unsigned long *);
+void dma_update_cpu_trans(unsigned long *, void *, dma_addr_t, int);
#endif
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 7365e8a46032..b4a5aa110cec 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -336,28 +336,28 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
-static const intercept_handler_t intercept_funcs[] = {
- [0x00 >> 2] = handle_noop,
- [0x04 >> 2] = handle_instruction,
- [0x08 >> 2] = handle_prog,
- [0x10 >> 2] = handle_noop,
- [0x14 >> 2] = handle_external_interrupt,
- [0x18 >> 2] = handle_noop,
- [0x1C >> 2] = kvm_s390_handle_wait,
- [0x20 >> 2] = handle_validity,
- [0x28 >> 2] = handle_stop,
- [0x38 >> 2] = handle_partial_execution,
-};
-
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{
- intercept_handler_t func;
- u8 code = vcpu->arch.sie_block->icptcode;
-
- if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
+ switch (vcpu->arch.sie_block->icptcode) {
+ case 0x00:
+ case 0x10:
+ case 0x18:
+ return handle_noop(vcpu);
+ case 0x04:
+ return handle_instruction(vcpu);
+ case 0x08:
+ return handle_prog(vcpu);
+ case 0x14:
+ return handle_external_interrupt(vcpu);
+ case 0x1c:
+ return kvm_s390_handle_wait(vcpu);
+ case 0x20:
+ return handle_validity(vcpu);
+ case 0x28:
+ return handle_stop(vcpu);
+ case 0x38:
+ return handle_partial_execution(vcpu);
+ default:
return -EOPNOTSUPP;
- func = intercept_funcs[code >> 2];
- if (func)
- return func(vcpu);
- return -EOPNOTSUPP;
+ }
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 5c2c169395c3..373e32346d68 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -51,11 +51,9 @@ static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
{
- if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
- (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
- (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
- return 0;
- return 1;
+ return psw_extint_disabled(vcpu) &&
+ psw_ioint_disabled(vcpu) &&
+ psw_mchk_disabled(vcpu);
}
static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
@@ -71,13 +69,8 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
static int ckc_irq_pending(struct kvm_vcpu *vcpu)
{
- preempt_disable();
- if (!(vcpu->arch.sie_block->ckc <
- get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
- preempt_enable();
+ if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
return 0;
- }
- preempt_enable();
return ckc_interrupts_enabled(vcpu);
}
@@ -109,14 +102,10 @@ static inline u8 int_word_to_isc(u32 int_word)
return (int_word & 0x38000000) >> 27;
}
-static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu)
+static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
{
- return vcpu->kvm->arch.float_int.pending_irqs;
-}
-
-static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.local_int.pending_irqs;
+ return vcpu->kvm->arch.float_int.pending_irqs |
+ vcpu->arch.local_int.pending_irqs;
}
static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
@@ -135,8 +124,7 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
{
unsigned long active_mask;
- active_mask = pending_local_irqs(vcpu);
- active_mask |= pending_floating_irqs(vcpu);
+ active_mask = pending_irqs(vcpu);
if (!active_mask)
return 0;
@@ -204,7 +192,7 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
{
- if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK))
+ if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
return;
else if (psw_ioint_disabled(vcpu))
__set_cpuflag(vcpu, CPUSTAT_IO_INT);
@@ -214,7 +202,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
{
- if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
+ if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
return;
if (psw_extint_disabled(vcpu))
__set_cpuflag(vcpu, CPUSTAT_EXT_INT);
@@ -224,7 +212,7 @@ static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
{
- if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
+ if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
return;
if (psw_mchk_disabled(vcpu))
vcpu->arch.sie_block->ictl |= ICTL_LPSW;
@@ -815,23 +803,21 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
{
- int rc;
+ if (deliverable_irqs(vcpu))
+ return 1;
- rc = !!deliverable_irqs(vcpu);
-
- if (!rc && kvm_cpu_has_pending_timer(vcpu))
- rc = 1;
+ if (kvm_cpu_has_pending_timer(vcpu))
+ return 1;
/* external call pending and deliverable */
- if (!rc && kvm_s390_ext_call_pending(vcpu) &&
+ if (kvm_s390_ext_call_pending(vcpu) &&
!psw_extint_disabled(vcpu) &&
(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
- rc = 1;
-
- if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
- rc = 1;
+ return 1;
- return rc;
+ if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
+ return 1;
+ return 0;
}
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -846,7 +832,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
vcpu->stat.exit_wait_state++;
/* fast path */
- if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
+ if (kvm_arch_vcpu_runnable(vcpu))
return 0;
if (psw_interrupts_disabled(vcpu)) {
@@ -860,9 +846,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
goto no_timer;
}
- preempt_disable();
- now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
- preempt_enable();
+ now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
/* underflow */
@@ -901,9 +885,7 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
u64 now, sltime;
vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
- preempt_disable();
- now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
- preempt_enable();
+ now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
/*
@@ -981,39 +963,30 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
irq->u.pgm.code, 0);
- li->irq.pgm = irq->u.pgm;
+ if (irq->u.pgm.code == PGM_PER) {
+ li->irq.pgm.code |= PGM_PER;
+ /* only modify PER related information */
+ li->irq.pgm.per_address = irq->u.pgm.per_address;
+ li->irq.pgm.per_code = irq->u.pgm.per_code;
+ li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
+ li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
+ } else if (!(irq->u.pgm.code & PGM_PER)) {
+ li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
+ irq->u.pgm.code;
+ /* only modify non-PER information */
+ li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
+ li->irq.pgm.mon_code = irq->u.pgm.mon_code;
+ li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
+ li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
+ li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
+ li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
+ } else {
+ li->irq.pgm = irq->u.pgm;
+ }
set_bit(IRQ_PEND_PROG, &li->pending_irqs);
return 0;
}
-int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
-{
- struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_irq irq;
-
- spin_lock(&li->lock);
- irq.u.pgm.code = code;
- __inject_prog(vcpu, &irq);
- BUG_ON(waitqueue_active(li->wq));
- spin_unlock(&li->lock);
- return 0;
-}
-
-int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
- struct kvm_s390_pgm_info *pgm_info)
-{
- struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_irq irq;
- int rc;
-
- spin_lock(&li->lock);
- irq.u.pgm = *pgm_info;
- rc = __inject_prog(vcpu, &irq);
- BUG_ON(waitqueue_active(li->wq));
- spin_unlock(&li->lock);
- return rc;
-}
-
static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -1390,12 +1363,9 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
- struct kvm_s390_float_interrupt *fi;
u64 type = READ_ONCE(inti->type);
int rc;
- fi = &kvm->arch.float_int;
-
switch (type) {
case KVM_S390_MCHK:
rc = __inject_float_mchk(kvm, inti);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c6b4063fce29..8fe2f1c722dc 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -514,35 +514,20 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
if (gtod_high != 0)
return -EINVAL;
- VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
+ VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
return 0;
}
static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
- struct kvm_vcpu *cur_vcpu;
- unsigned int vcpu_idx;
- u64 host_tod, gtod;
- int r;
+ u64 gtod;
if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
return -EFAULT;
- r = store_tod_clock(&host_tod);
- if (r)
- return r;
-
- mutex_lock(&kvm->lock);
- preempt_disable();
- kvm->arch.epoch = gtod - host_tod;
- kvm_s390_vcpu_block_all(kvm);
- kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
- cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
- kvm_s390_vcpu_unblock_all(kvm);
- preempt_enable();
- mutex_unlock(&kvm->lock);
- VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
+ kvm_s390_set_tod_clock(kvm, gtod);
+ VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
return 0;
}
@@ -574,26 +559,19 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
if (copy_to_user((void __user *)attr->addr, &gtod_high,
sizeof(gtod_high)))
return -EFAULT;
- VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
+ VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
return 0;
}
static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
- u64 host_tod, gtod;
- int r;
+ u64 gtod;
- r = store_tod_clock(&host_tod);
- if (r)
- return r;
-
- preempt_disable();
- gtod = host_tod + kvm->arch.epoch;
- preempt_enable();
+ gtod = kvm_s390_get_tod_clock_fast(kvm);
if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
return -EFAULT;
- VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
+ VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
return 0;
}
@@ -1120,7 +1098,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.sca)
goto out_err;
spin_lock(&kvm_lock);
- sca_offset = (sca_offset + 16) & 0x7f0;
+ sca_offset += 16;
+ if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE)
+ sca_offset = 0;
kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
spin_unlock(&kvm_lock);
@@ -1911,6 +1891,22 @@ retry:
return 0;
}
+void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ mutex_lock(&kvm->lock);
+ preempt_disable();
+ kvm->arch.epoch = tod - get_tod_clock();
+ kvm_s390_vcpu_block_all(kvm);
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ vcpu->arch.sie_block->epoch = kvm->arch.epoch;
+ kvm_s390_vcpu_unblock_all(kvm);
+ preempt_enable();
+ mutex_unlock(&kvm->lock);
+}
+
/**
* kvm_arch_fault_in_page - fault-in guest page if necessary
* @vcpu: The corresponding virtual cpu
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index c446aabf60d3..1e70e00d3c5e 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -175,6 +175,7 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
return kvm->arch.user_cpu_state_ctrl != 0;
}
+/* implemented in interrupt.c */
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
@@ -185,7 +186,25 @@ int __must_check kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int);
int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
struct kvm_s390_irq *irq);
-int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
+static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
+ struct kvm_s390_pgm_info *pgm_info)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_PROGRAM_INT,
+ .u.pgm = *pgm_info,
+ };
+
+ return kvm_s390_inject_vcpu(vcpu, &irq);
+}
+static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_PROGRAM_INT,
+ .u.pgm.code = code,
+ };
+
+ return kvm_s390_inject_vcpu(vcpu, &irq);
+}
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 isc_mask, u32 schid);
int kvm_s390_reinject_io_int(struct kvm *kvm,
@@ -212,6 +231,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */
+void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
@@ -231,9 +251,6 @@ extern unsigned long kvm_s390_fac_list_mask[];
/* implemented in diag.c */
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
-/* implemented in interrupt.c */
-int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
- struct kvm_s390_pgm_info *pgm_info);
static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
{
@@ -254,6 +271,16 @@ static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
kvm_s390_vcpu_unblock(vcpu);
}
+static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
+{
+ u64 rc;
+
+ preempt_disable();
+ rc = get_tod_clock_fast() + kvm->arch.epoch;
+ preempt_enable();
+ return rc;
+}
+
/**
* kvm_s390_inject_prog_cond - conditionally inject a program check
* @vcpu: virtual cpu
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 4d21dc4d1a84..77191b85ea7a 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -33,11 +33,9 @@
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *cpup;
- s64 hostclk, val;
- int i, rc;
+ int rc;
ar_t ar;
- u64 op2;
+ u64 op2, val;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -49,19 +47,8 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
- if (store_tod_clock(&hostclk)) {
- kvm_s390_set_psw_cc(vcpu, 3);
- return 0;
- }
VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
- val = (val - hostclk) & ~0x3fUL;
-
- mutex_lock(&vcpu->kvm->lock);
- preempt_disable();
- kvm_for_each_vcpu(i, cpup, vcpu->kvm)
- cpup->arch.sie_block->epoch = val;
- preempt_enable();
- mutex_unlock(&vcpu->kvm->lock);
+ kvm_s390_set_tod_clock(vcpu->kvm, val);
kvm_s390_set_psw_cc(vcpu, 0);
return 0;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 37505b8b4093..37d10f74425a 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -24,7 +24,7 @@ static int zpci_refresh_global(struct zpci_dev *zdev)
zdev->iommu_pages * PAGE_SIZE);
}
-static unsigned long *dma_alloc_cpu_table(void)
+unsigned long *dma_alloc_cpu_table(void)
{
unsigned long *table, *entry;
@@ -114,12 +114,12 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr
return &pto[px];
}
-static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
- dma_addr_t dma_addr, int flags)
+void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr,
+ dma_addr_t dma_addr, int flags)
{
unsigned long *entry;
- entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
+ entry = dma_walk_cpu_trans(dma_table, dma_addr);
if (!entry) {
WARN_ON_ONCE(1);
return;
@@ -156,7 +156,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
goto no_refresh;
for (i = 0; i < nr_pages; i++) {
- dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
+ dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr,
+ flags);
page_addr += PAGE_SIZE;
dma_addr += PAGE_SIZE;
}
@@ -181,7 +182,7 @@ no_refresh:
return rc;
}
-static void dma_free_seg_table(unsigned long entry)
+void dma_free_seg_table(unsigned long entry)
{
unsigned long *sto = get_rt_sto(entry);
int sx;
@@ -193,21 +194,18 @@ static void dma_free_seg_table(unsigned long entry)
dma_free_cpu_table(sto);
}
-static void dma_cleanup_tables(struct zpci_dev *zdev)
+void dma_cleanup_tables(unsigned long *table)
{
- unsigned long *table;
int rtx;
- if (!zdev || !zdev->dma_table)
+ if (!table)
return;
- table = zdev->dma_table;
for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
if (reg_entry_isvalid(table[rtx]))
dma_free_seg_table(table[rtx]);
dma_free_cpu_table(table);
- zdev->dma_table = NULL;
}
static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
@@ -416,6 +414,13 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
{
int rc;
+ /*
+ * At this point, if the device is part of an IOMMU domain, this would
+ * be a strong hint towards a bug in the IOMMU API (common) code and/or
+ * simultaneous access via IOMMU and DMA API. So let's issue a warning.
+ */
+ WARN_ON(zdev->s390_domain);
+
spin_lock_init(&zdev->iommu_bitmap_lock);
spin_lock_init(&zdev->dma_table_lock);
@@ -450,8 +455,16 @@ out_clean:
void zpci_dma_exit_device(struct zpci_dev *zdev)
{
+ /*
+ * At this point, if the device is part of an IOMMU domain, this would
+ * be a strong hint towards a bug in the IOMMU API (common) code and/or
+ * simultaneous access via IOMMU and DMA API. So let's issue a warning.
+ */
+ WARN_ON(zdev->s390_domain);
+
zpci_unregister_ioat(zdev, 0);
- dma_cleanup_tables(zdev);
+ dma_cleanup_tables(zdev->dma_table);
+ zdev->dma_table = NULL;
vfree(zdev->iommu_bitmap);
zdev->iommu_bitmap = NULL;
zdev->next_bit = 0;
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index 01d17046225a..bec481aaca16 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -31,6 +31,9 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
+int __node_distance(int, int);
+#define node_distance(a, b) __node_distance(a, b)
+
#else /* CONFIG_NUMA */
#include <asm-generic/topology.h>
diff --git a/arch/sparc/include/uapi/asm/asi.h b/arch/sparc/include/uapi/asm/asi.h
index aace6f313716..7ad7203deaec 100644
--- a/arch/sparc/include/uapi/asm/asi.h
+++ b/arch/sparc/include/uapi/asm/asi.h
@@ -279,7 +279,7 @@
* Most-Recently-Used, primary,
* implicit
*/
-#define ASI_ST_BLKINIT_MRU_S 0xf2 /* (NG4) init-store, twin load,
+#define ASI_ST_BLKINIT_MRU_S 0xf3 /* (NG4) init-store, twin load,
* Most-Recently-Used, secondary,
* implicit
*/
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 5320689c06e9..37686828c3d9 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -161,7 +161,7 @@ static inline iopte_t *alloc_npages(struct device *dev,
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
(unsigned long)(-1), 0);
- if (unlikely(entry == DMA_ERROR_CODE))
+ if (unlikely(entry == IOMMU_ERROR_CODE))
return NULL;
return iommu->page_table + entry;
@@ -253,7 +253,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
- iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE);
+ iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size);
if (order < 10)
@@ -426,7 +426,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
- iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
+ iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -492,7 +492,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
&handle, (unsigned long)(-1), 0);
/* Handle failure */
- if (unlikely(entry == DMA_ERROR_CODE)) {
+ if (unlikely(entry == IOMMU_ERROR_CODE)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
@@ -571,7 +571,7 @@ iommu_map_failed:
iopte_make_dummy(iommu, base + j);
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
- DMA_ERROR_CODE);
+ IOMMU_ERROR_CODE);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -648,7 +648,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
iopte_make_dummy(iommu, base + i);
iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
- DMA_ERROR_CODE);
+ IOMMU_ERROR_CODE);
sg = sg_next(sg);
}
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 1ae5eb1bb045..59d503866431 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1953,7 +1953,7 @@ static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table,
npages, NULL, (unsigned long)-1, 0);
- if (unlikely(entry < 0))
+ if (unlikely(entry == IOMMU_ERROR_CODE))
return NULL;
return iommu->page_table + entry;
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index d2fe57dad433..836e8cef47e2 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -159,7 +159,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
(unsigned long)(-1), 0);
- if (unlikely(entry == DMA_ERROR_CODE))
+ if (unlikely(entry == IOMMU_ERROR_CODE))
goto range_alloc_fail;
*dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
@@ -187,7 +187,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
return ret;
iommu_map_fail:
- iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE);
+ iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
range_alloc_fail:
free_pages(first_page, order);
@@ -226,7 +226,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
devhandle = pbm->devhandle;
entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE);
+ iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
@@ -256,7 +256,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
(unsigned long)(-1), 0);
- if (unlikely(entry == DMA_ERROR_CODE))
+ if (unlikely(entry == IOMMU_ERROR_CODE))
goto bad;
bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
@@ -288,7 +288,7 @@ bad:
return DMA_ERROR_CODE;
iommu_map_fail:
- iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
+ iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
return DMA_ERROR_CODE;
}
@@ -317,7 +317,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
bus_addr &= IO_PAGE_MASK;
entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
+ iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -376,7 +376,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
&handle, (unsigned long)(-1), 0);
/* Handle failure */
- if (unlikely(entry == DMA_ERROR_CODE)) {
+ if (unlikely(entry == IOMMU_ERROR_CODE)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
@@ -451,7 +451,7 @@ iommu_map_failed:
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
- DMA_ERROR_CODE);
+ IOMMU_ERROR_CODE);
/* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -496,7 +496,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
entry = ((dma_handle - tbl->table_map_base) >> shift);
dma_4v_iommu_demap(&devhandle, entry, npages);
iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
- DMA_ERROR_CODE);
+ IOMMU_ERROR_CODE);
sg = sg_next(sg);
}
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 62098a89bbbf..d89e97b374cf 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -436,24 +436,26 @@ extern void sun4v_data_access_exception(struct pt_regs *regs,
int handle_ldf_stq(u32 insn, struct pt_regs *regs)
{
unsigned long addr = compute_effective_address(regs, insn, 0);
- int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
+ int freg;
struct fpustate *f = FPUSTATE;
int asi = decode_asi(insn, regs);
- int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+ int flag;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
save_and_clear_fpu();
current_thread_info()->xfsr[0] &= ~0x1c000;
- if (freg & 3) {
- current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
- do_fpother(regs);
- return 0;
- }
if (insn & 0x200000) {
/* STQ */
u64 first = 0, second = 0;
+ freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
+ flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+ if (freg & 3) {
+ current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
+ do_fpother(regs);
+ return 0;
+ }
if (current_thread_info()->fpsaved[0] & flag) {
first = *(u64 *)&f->regs[freg];
second = *(u64 *)&f->regs[freg+2];
@@ -513,6 +515,12 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
case 0x100000: size = 4; break;
default: size = 2; break;
}
+ if (size == 1)
+ freg = (insn >> 25) & 0x1f;
+ else
+ freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
+ flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+
for (i = 0; i < size; i++)
data[i] = 0;
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index a063d84336d6..62c2647bd5ce 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -6,24 +6,23 @@
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
+#include <linux/linkage.h>
+
#include <asm/asi.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
- .text
- .globl VISenter, VISenterhalf
-
/* On entry: %o5=current FPRS value, %g7 is callers address */
/* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
/* Nothing special need be done here to handle pre-emption, this
* FPU save/restore mechanism is already preemption safe.
*/
-
+ .text
.align 32
-VISenter:
+ENTRY(VISenter)
ldub [%g6 + TI_FPDEPTH], %g1
brnz,a,pn %g1, 1f
cmp %g1, 1
@@ -79,3 +78,4 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
.align 32
80: jmpl %g7 + %g0, %g0
nop
+ENDPROC(VISenter)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 4ac88b757514..3025bd57f7ab 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -93,6 +93,8 @@ static unsigned long cpu_pgsz_mask;
static struct linux_prom64_registers pavail[MAX_BANKS];
static int pavail_ents;
+u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
+
static int cmp_p64(const void *a, const void *b)
{
const struct linux_prom64_registers *x = a, *y = b;
@@ -1157,6 +1159,48 @@ static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
return NULL;
}
+int __node_distance(int from, int to)
+{
+ if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
+ pr_warn("Returning default NUMA distance value for %d->%d\n",
+ from, to);
+ return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
+ }
+ return numa_latency[from][to];
+}
+
+static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ struct node_mem_mask *n = &node_masks[i];
+
+ if ((grp->mask == n->mask) && (grp->match == n->val))
+ break;
+ }
+ return i;
+}
+
+static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp,
+ int index)
+{
+ u64 arc;
+
+ mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
+ int tnode;
+ u64 target = mdesc_arc_target(md, arc);
+ struct mdesc_mlgroup *m = find_mlgroup(target);
+
+ if (!m)
+ continue;
+ tnode = find_best_numa_node_for_mlgroup(m);
+ if (tnode == MAX_NUMNODES)
+ continue;
+ numa_latency[index][tnode] = m->latency;
+ }
+}
+
static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
int index)
{
@@ -1220,9 +1264,16 @@ static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
static int __init numa_parse_mdesc(void)
{
struct mdesc_handle *md = mdesc_grab();
- int i, err, count;
+ int i, j, err, count;
u64 node;
+ /* Some sane defaults for numa latency values */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ for (j = 0; j < MAX_NUMNODES; j++)
+ numa_latency[i][j] = (i == j) ?
+ LOCAL_DISTANCE : REMOTE_DISTANCE;
+ }
+
node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
if (node == MDESC_NODE_NULL) {
mdesc_release(md);
@@ -1245,6 +1296,23 @@ static int __init numa_parse_mdesc(void)
count++;
}
+ count = 0;
+ mdesc_for_each_node_by_name(md, node, "group") {
+ find_numa_latencies_for_group(md, node, count);
+ count++;
+ }
+
+ /* Normalize numa latency matrix according to ACPI SLIT spec. */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ u64 self_latency = numa_latency[i][i];
+
+ for (j = 0; j < MAX_NUMNODES; j++) {
+ numa_latency[i][j] =
+ (numa_latency[i][j] * LOCAL_DISTANCE) /
+ self_latency;
+ }
+ }
+
add_node_ranges();
for (i = 0; i < num_node_masks; i++) {
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 928237a7b9ca..c9faddc61100 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -222,7 +222,7 @@ config I2C_BATTERY_BQ27200
tristate "I2C Battery BQ27200 Support"
select I2C_PUV3
select POWER_SUPPLY
- select BATTERY_BQ27x00
+ select BATTERY_BQ27XXX
config I2C_EEPROM_AT24
tristate "I2C EEPROMs AT24 support"
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 046c7fb1ca43..a210eba2727c 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -33,6 +33,11 @@ enum irq_remap_cap {
IRQ_POSTING_CAP = 0,
};
+struct vcpu_data {
+ u64 pi_desc_addr; /* Physical address of PI Descriptor */
+ u32 vector; /* Guest vector of the interrupt */
+};
+
#ifdef CONFIG_IRQ_REMAP
extern bool irq_remapping_cap(enum irq_remap_cap cap);
@@ -58,11 +63,6 @@ static inline struct irq_domain *arch_get_ir_parent_domain(void)
return x86_vector_domain;
}
-struct vcpu_data {
- u64 pi_desc_addr; /* Physical address of PI Descriptor */
- u32 vector; /* Guest vector of the interrupt */
-};
-
#else /* CONFIG_IRQ_REMAP */
static inline bool irq_remapping_cap(enum irq_remap_cap cap) { return 0; }
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index e16466ec473c..e9cd7befcb76 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -112,6 +112,16 @@ struct x86_emulate_ops {
struct x86_exception *fault);
/*
+ * read_phys: Read bytes of standard (non-emulated/special) memory.
+ * Used for descriptor reading.
+ * @addr: [IN ] Physical address from which to read.
+ * @val: [OUT] Value read from memory.
+ * @bytes: [IN ] Number of bytes to read from memory.
+ */
+ int (*read_phys)(struct x86_emulate_ctxt *ctxt, unsigned long addr,
+ void *val, unsigned int bytes);
+
+ /*
* write_std: Write bytes of standard (non-emulated/special) memory.
* Used for descriptor writing.
* @addr: [IN ] Linear address to which to write.
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3a36ee704c30..9265196e877f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -24,6 +24,7 @@
#include <linux/perf_event.h>
#include <linux/pvclock_gtod.h>
#include <linux/clocksource.h>
+#include <linux/irqbypass.h>
#include <asm/pvclock-abi.h>
#include <asm/desc.h>
@@ -176,6 +177,8 @@ enum {
*/
#define KVM_APIC_PV_EOI_PENDING 1
+struct kvm_kernel_irq_routing_entry;
+
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
@@ -374,6 +377,7 @@ struct kvm_mtrr {
/* Hyper-V per vcpu emulation context */
struct kvm_vcpu_hv {
u64 hv_vapic;
+ s64 runtime_offset;
};
struct kvm_vcpu_arch {
@@ -396,6 +400,7 @@ struct kvm_vcpu_arch {
u64 efer;
u64 apic_base;
struct kvm_lapic *apic; /* kernel irqchip context */
+ u64 eoi_exit_bitmap[4];
unsigned long apic_attention;
int32_t apic_arb_prio;
int mp_state;
@@ -573,6 +578,9 @@ struct kvm_vcpu_arch {
struct {
bool pv_unhalted;
} pv;
+
+ int pending_ioapic_eoi;
+ int pending_external_vector;
};
struct kvm_lpage_info {
@@ -683,6 +691,9 @@ struct kvm_arch {
u32 bsp_vcpu_id;
u64 disabled_quirks;
+
+ bool irqchip_split;
+ u8 nr_reserved_ioapic_pins;
};
struct kvm_vm_stat {
@@ -819,10 +830,10 @@ struct kvm_x86_ops {
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
void (*enable_irq_window)(struct kvm_vcpu *vcpu);
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
- int (*vm_has_apicv)(struct kvm *kvm);
+ int (*cpu_uses_apicv)(struct kvm_vcpu *vcpu);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
void (*hwapic_isr_update)(struct kvm *kvm, int isr);
- void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+ void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu);
void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
@@ -887,6 +898,20 @@ struct kvm_x86_ops {
gfn_t offset, unsigned long mask);
/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
+
+ /*
+ * Architecture specific hooks for vCPU blocking due to
+ * HLT instruction.
+ * Returns for .pre_block():
+ * - 0 means continue to block the vCPU.
+ * - 1 means we cannot block the vCPU since some event
+ * happens during this period, such as, 'ON' bit in
+ * posted-interrupts descriptor is set.
+ */
+ int (*pre_block)(struct kvm_vcpu *vcpu);
+ void (*post_block)(struct kvm_vcpu *vcpu);
+ int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set);
};
struct kvm_arch_async_pf {
@@ -1231,4 +1256,13 @@ int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
+bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
+ struct kvm_vcpu **dest_vcpu);
+
+void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm_lapic_irq *irq);
+
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 448b7ca61aee..aa336ff3e03e 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -72,7 +72,7 @@
#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
#define SECONDARY_EXEC_ENABLE_PML 0x00020000
#define SECONDARY_EXEC_XSAVES 0x00100000
-
+#define SECONDARY_EXEC_PCOMMIT 0x00200000
#define PIN_BASED_EXT_INTR_MASK 0x00000001
#define PIN_BASED_NMI_EXITING 0x00000008
@@ -416,6 +416,7 @@ enum vmcs_field {
#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
+#define VMX_VPID_INVVPID_BIT (1ull << 0) /* (32 - 32) */
#define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT (1ull << 9) /* (41 - 32) */
#define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index f0412c50c47b..040d4083c24f 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -153,6 +153,12 @@
/* MSR used to provide vcpu index */
#define HV_X64_MSR_VP_INDEX 0x40000002
+/* MSR used to reset the guest OS. */
+#define HV_X64_MSR_RESET 0x40000003
+
+/* MSR used to provide vcpu runtime in 100ns units */
+#define HV_X64_MSR_VP_RUNTIME 0x40000010
+
/* MSR used to read the per-partition time reference counter */
#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
@@ -251,4 +257,16 @@ typedef struct _HV_REFERENCE_TSC_PAGE {
__s64 tsc_offset;
} HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE;
+/* Define the number of synthetic interrupt sources. */
+#define HV_SYNIC_SINT_COUNT (16)
+/* Define the expected SynIC version. */
+#define HV_SYNIC_VERSION_1 (0x1)
+
+#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIMP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SINT_MASKED (1ULL << 16)
+#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
+#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
+
#endif
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 37fee272618f..5b15d94a33f8 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -78,6 +78,7 @@
#define EXIT_REASON_PML_FULL 62
#define EXIT_REASON_XSAVES 63
#define EXIT_REASON_XRSTORS 64
+#define EXIT_REASON_PCOMMIT 65
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
@@ -126,7 +127,8 @@
{ EXIT_REASON_INVVPID, "INVVPID" }, \
{ EXIT_REASON_INVPCID, "INVPCID" }, \
{ EXIT_REASON_XSAVES, "XSAVES" }, \
- { EXIT_REASON_XRSTORS, "XRSTORS" }
+ { EXIT_REASON_XRSTORS, "XRSTORS" }, \
+ { EXIT_REASON_PCOMMIT, "PCOMMIT" }
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 2c7aafa70702..2bd81e302427 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -32,6 +32,7 @@
static int kvmclock = 1;
static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
+static cycle_t kvm_sched_clock_offset;
static int parse_no_kvmclock(char *arg)
{
@@ -92,6 +93,29 @@ static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
return kvm_clock_read();
}
+static cycle_t kvm_sched_clock_read(void)
+{
+ return kvm_clock_read() - kvm_sched_clock_offset;
+}
+
+static inline void kvm_sched_clock_init(bool stable)
+{
+ if (!stable) {
+ pv_time_ops.sched_clock = kvm_clock_read;
+ return;
+ }
+
+ kvm_sched_clock_offset = kvm_clock_read();
+ pv_time_ops.sched_clock = kvm_sched_clock_read;
+ set_sched_clock_stable();
+
+ printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n",
+ kvm_sched_clock_offset);
+
+ BUILD_BUG_ON(sizeof(kvm_sched_clock_offset) >
+ sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time));
+}
+
/*
* If we don't do that, there is the possibility that the guest
* will calibrate under heavy load - thus, getting a lower lpj -
@@ -248,7 +272,17 @@ void __init kvmclock_init(void)
memblock_free(mem, size);
return;
}
- pv_time_ops.sched_clock = kvm_clock_read;
+
+ if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
+ pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
+
+ cpu = get_cpu();
+ vcpu_time = &hv_clock[cpu].pvti;
+ flags = pvclock_read_flags(vcpu_time);
+
+ kvm_sched_clock_init(flags & PVCLOCK_TSC_STABLE_BIT);
+ put_cpu();
+
x86_platform.calibrate_tsc = kvm_get_tsc_khz;
x86_platform.get_wallclock = kvm_get_wallclock;
x86_platform.set_wallclock = kvm_set_wallclock;
@@ -265,16 +299,6 @@ void __init kvmclock_init(void)
kvm_get_preset_lpj();
clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
pv_info.name = "KVM";
-
- if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
- pvclock_set_flags(~0);
-
- cpu = get_cpu();
- vcpu_time = &hv_clock[cpu].pvti;
- flags = pvclock_read_flags(vcpu_time);
- if (flags & PVCLOCK_COUNTS_FROM_ZERO)
- set_sched_clock_stable();
- put_cpu();
}
int __init kvm_setup_vsyscall_timeinfo(void)
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index d8a1d56276e1..639a6e34500c 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -28,6 +28,8 @@ config KVM
select ANON_INODES
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
+ select IRQ_BYPASS_MANAGER
+ select HAVE_KVM_IRQ_BYPASS
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_EVENTFD
select KVM_APIC_ARCHITECTURE
diff --git a/arch/x86/kvm/assigned-dev.c b/arch/x86/kvm/assigned-dev.c
index d090ecf08809..9dc091acd5fb 100644
--- a/arch/x86/kvm/assigned-dev.c
+++ b/arch/x86/kvm/assigned-dev.c
@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include "irq.h"
#include "assigned-dev.h"
+#include "trace/events/kvm.h"
struct kvm_assigned_dev_kernel {
struct kvm_irq_ack_notifier ack_notifier;
@@ -131,7 +132,42 @@ static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef __KVM_HAVE_MSI
+/*
+ * Deliver an IRQ in an atomic context if we can, or return a failure,
+ * user can retry in a process context.
+ * Return value:
+ * -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context.
+ * Other values - No need to retry.
+ */
+static int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level)
+{
+ struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
+ struct kvm_kernel_irq_routing_entry *e;
+ int ret = -EINVAL;
+ int idx;
+
+ trace_kvm_set_irq(irq, level, irq_source_id);
+
+ /*
+ * Injection into either PIC or IOAPIC might need to scan all CPUs,
+ * which would need to be retried from thread context; when same GSI
+ * is connected to both PIC and IOAPIC, we'd have to report a
+ * partial failure here.
+ * Since there's no easy way to do this, we only support injecting MSI
+ * which is limited to 1:1 GSI mapping.
+ */
+ idx = srcu_read_lock(&kvm->irq_srcu);
+ if (kvm_irq_map_gsi(kvm, entries, irq) > 0) {
+ e = &entries[0];
+ ret = kvm_arch_set_irq_inatomic(e, kvm, irq_source_id,
+ irq, level);
+ }
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+ return ret;
+}
+
+
static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id)
{
struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
@@ -150,9 +186,7 @@ static irqreturn_t kvm_assigned_dev_thread_msi(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#endif
-#ifdef __KVM_HAVE_MSIX
static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id)
{
struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
@@ -183,7 +217,6 @@ static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#endif
/* Ack the irq line for an assigned device */
static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
@@ -386,7 +419,6 @@ static int assigned_device_enable_host_intx(struct kvm *kvm,
return 0;
}
-#ifdef __KVM_HAVE_MSI
static int assigned_device_enable_host_msi(struct kvm *kvm,
struct kvm_assigned_dev_kernel *dev)
{
@@ -408,9 +440,7 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
return 0;
}
-#endif
-#ifdef __KVM_HAVE_MSIX
static int assigned_device_enable_host_msix(struct kvm *kvm,
struct kvm_assigned_dev_kernel *dev)
{
@@ -443,8 +473,6 @@ err:
return r;
}
-#endif
-
static int assigned_device_enable_guest_intx(struct kvm *kvm,
struct kvm_assigned_dev_kernel *dev,
struct kvm_assigned_irq *irq)
@@ -454,7 +482,6 @@ static int assigned_device_enable_guest_intx(struct kvm *kvm,
return 0;
}
-#ifdef __KVM_HAVE_MSI
static int assigned_device_enable_guest_msi(struct kvm *kvm,
struct kvm_assigned_dev_kernel *dev,
struct kvm_assigned_irq *irq)
@@ -463,9 +490,7 @@ static int assigned_device_enable_guest_msi(struct kvm *kvm,
dev->ack_notifier.gsi = -1;
return 0;
}
-#endif
-#ifdef __KVM_HAVE_MSIX
static int assigned_device_enable_guest_msix(struct kvm *kvm,
struct kvm_assigned_dev_kernel *dev,
struct kvm_assigned_irq *irq)
@@ -474,7 +499,6 @@ static int assigned_device_enable_guest_msix(struct kvm *kvm,
dev->ack_notifier.gsi = -1;
return 0;
}
-#endif
static int assign_host_irq(struct kvm *kvm,
struct kvm_assigned_dev_kernel *dev,
@@ -492,16 +516,12 @@ static int assign_host_irq(struct kvm *kvm,
case KVM_DEV_IRQ_HOST_INTX:
r = assigned_device_enable_host_intx(kvm, dev);
break;
-#ifdef __KVM_HAVE_MSI
case KVM_DEV_IRQ_HOST_MSI:
r = assigned_device_enable_host_msi(kvm, dev);
break;
-#endif
-#ifdef __KVM_HAVE_MSIX
case KVM_DEV_IRQ_HOST_MSIX:
r = assigned_device_enable_host_msix(kvm, dev);
break;
-#endif
default:
r = -EINVAL;
}
@@ -534,16 +554,12 @@ static int assign_guest_irq(struct kvm *kvm,
case KVM_DEV_IRQ_GUEST_INTX:
r = assigned_device_enable_guest_intx(kvm, dev, irq);
break;
-#ifdef __KVM_HAVE_MSI
case KVM_DEV_IRQ_GUEST_MSI:
r = assigned_device_enable_guest_msi(kvm, dev, irq);
break;
-#endif
-#ifdef __KVM_HAVE_MSIX
case KVM_DEV_IRQ_GUEST_MSIX:
r = assigned_device_enable_guest_msix(kvm, dev, irq);
break;
-#endif
default:
r = -EINVAL;
}
@@ -826,7 +842,6 @@ out:
}
-#ifdef __KVM_HAVE_MSIX
static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
struct kvm_assigned_msix_nr *entry_nr)
{
@@ -906,7 +921,6 @@ msix_entry_out:
return r;
}
-#endif
static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm,
struct kvm_assigned_pci_dev *assigned_dev)
@@ -1012,7 +1026,6 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
goto out;
break;
}
-#ifdef __KVM_HAVE_MSIX
case KVM_ASSIGN_SET_MSIX_NR: {
struct kvm_assigned_msix_nr entry_nr;
r = -EFAULT;
@@ -1033,7 +1046,6 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
goto out;
break;
}
-#endif
case KVM_ASSIGN_SET_INTX_MASK: {
struct kvm_assigned_pci_dev assigned_dev;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 156441bcaac8..6525e926f566 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -348,7 +348,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
- F(AVX512CD);
+ F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(PCOMMIT);
/* cpuid 0xD.1.eax */
const u32 kvm_supported_word10_x86_features =
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index dd05b9cef6ae..06332cb7e7d1 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -133,4 +133,41 @@ static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_MPX));
}
+
+static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
+}
+
+static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+ return best && (best->edx & bit(X86_FEATURE_RDTSCP));
+}
+
+/*
+ * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
+ */
+#define BIT_NRIPS 3
+
+static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
+
+ /*
+ * NRIPS is a scattered cpuid feature, so we can't use
+ * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
+ * position 8, not 3).
+ */
+ return best && (best->edx & bit(BIT_NRIPS));
+}
+#undef BIT_NRIPS
+
#endif
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 9da95b9daf8d..1505587d06e9 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2272,8 +2272,8 @@ static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
#define GET_SMSTATE(type, smbase, offset) \
({ \
type __val; \
- int r = ctxt->ops->read_std(ctxt, smbase + offset, &__val, \
- sizeof(__val), NULL); \
+ int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
+ sizeof(__val)); \
if (r != X86EMUL_CONTINUE) \
return X86EMUL_UNHANDLEABLE; \
__val; \
@@ -2484,17 +2484,36 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
/*
* Get back to real mode, to prepare a safe state in which to load
- * CR0/CR3/CR4/EFER. Also this will ensure that addresses passed
- * to read_std/write_std are not virtual.
- *
- * CR4.PCIDE must be zero, because it is a 64-bit mode only feature.
+ * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
+ * supports long mode.
*/
+ cr4 = ctxt->ops->get_cr(ctxt, 4);
+ if (emulator_has_longmode(ctxt)) {
+ struct desc_struct cs_desc;
+
+ /* Zero CR4.PCIDE before CR0.PG. */
+ if (cr4 & X86_CR4_PCIDE) {
+ ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
+ cr4 &= ~X86_CR4_PCIDE;
+ }
+
+ /* A 32-bit code segment is required to clear EFER.LMA. */
+ memset(&cs_desc, 0, sizeof(cs_desc));
+ cs_desc.type = 0xb;
+ cs_desc.s = cs_desc.g = cs_desc.p = 1;
+ ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
+ }
+
+ /* For the 64-bit case, this will clear EFER.LMA. */
cr0 = ctxt->ops->get_cr(ctxt, 0);
if (cr0 & X86_CR0_PE)
ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
- cr4 = ctxt->ops->get_cr(ctxt, 4);
+
+ /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
if (cr4 & X86_CR4_PAE)
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
+
+ /* And finally go back to 32-bit mode. */
efer = 0;
ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
@@ -4455,7 +4474,7 @@ static const struct opcode twobyte_table[256] = {
F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
/* 0xA8 - 0xAF */
I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
- II(No64 | EmulateOnUD | ImplicitOps, em_rsm, rsm),
+ II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index a8160d2ae362..62cf8c915e95 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -41,6 +41,7 @@ static bool kvm_hv_msr_partition_wide(u32 msr)
case HV_X64_MSR_TIME_REF_COUNT:
case HV_X64_MSR_CRASH_CTL:
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+ case HV_X64_MSR_RESET:
r = true;
break;
}
@@ -163,6 +164,12 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
data);
case HV_X64_MSR_CRASH_CTL:
return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
+ case HV_X64_MSR_RESET:
+ if (data == 1) {
+ vcpu_debug(vcpu, "hyper-v reset requested\n");
+ kvm_make_request(KVM_REQ_HV_RESET, vcpu);
+ }
+ break;
default:
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
msr, data);
@@ -171,7 +178,16 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
return 0;
}
-static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+/* Calculate cpu time spent by current task in 100ns units */
+static u64 current_task_runtime_100ns(void)
+{
+ cputime_t utime, stime;
+
+ task_cputime_adjusted(current, &utime, &stime);
+ return div_u64(cputime_to_nsecs(utime + stime), 100);
+}
+
+static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
@@ -205,6 +221,11 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
case HV_X64_MSR_TPR:
return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
+ case HV_X64_MSR_VP_RUNTIME:
+ if (!host)
+ return 1;
+ hv->runtime_offset = data - current_task_runtime_100ns();
+ break;
default:
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
msr, data);
@@ -241,6 +262,9 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
pdata);
case HV_X64_MSR_CRASH_CTL:
return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
+ case HV_X64_MSR_RESET:
+ data = 0;
+ break;
default:
vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1;
@@ -277,6 +301,9 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case HV_X64_MSR_APIC_ASSIST_PAGE:
data = hv->hv_vapic;
break;
+ case HV_X64_MSR_VP_RUNTIME:
+ data = current_task_runtime_100ns() + hv->runtime_offset;
+ break;
default:
vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1;
@@ -295,7 +322,7 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
mutex_unlock(&vcpu->kvm->lock);
return r;
} else
- return kvm_hv_set_msr(vcpu, msr, data);
+ return kvm_hv_set_msr(vcpu, msr, data, host);
}
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index f90952f64e79..08116ff227cc 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -35,6 +35,7 @@
#include <linux/kvm_host.h>
#include <linux/slab.h>
+#include "ioapic.h"
#include "irq.h"
#include "i8254.h"
#include "x86.h"
@@ -333,7 +334,8 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
s64 interval;
- if (!irqchip_in_kernel(kvm) || ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
+ if (!ioapic_in_kernel(kvm) ||
+ ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
return;
interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 856f79105bb5..88d0a92d3f94 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -233,21 +233,7 @@ static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
}
-static void update_handled_vectors(struct kvm_ioapic *ioapic)
-{
- DECLARE_BITMAP(handled_vectors, 256);
- int i;
-
- memset(handled_vectors, 0, sizeof(handled_vectors));
- for (i = 0; i < IOAPIC_NUM_PINS; ++i)
- __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
- memcpy(ioapic->handled_vectors, handled_vectors,
- sizeof(handled_vectors));
- smp_wmb();
-}
-
-void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
- u32 *tmr)
+void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
{
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
union kvm_ioapic_redirect_entry *e;
@@ -260,13 +246,11 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
index == RTC_GSI) {
if (kvm_apic_match_dest(vcpu, NULL, 0,
- e->fields.dest_id, e->fields.dest_mode)) {
+ e->fields.dest_id, e->fields.dest_mode) ||
+ (e->fields.trig_mode == IOAPIC_EDGE_TRIG &&
+ kvm_apic_pending_eoi(vcpu, e->fields.vector)))
__set_bit(e->fields.vector,
(unsigned long *)eoi_exit_bitmap);
- if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG)
- __set_bit(e->fields.vector,
- (unsigned long *)tmr);
- }
}
}
spin_unlock(&ioapic->lock);
@@ -315,7 +299,6 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
e->bits |= (u32) val;
e->fields.remote_irr = 0;
}
- update_handled_vectors(ioapic);
mask_after = e->fields.mask;
if (mask_before != mask_after)
kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
@@ -599,7 +582,6 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
ioapic->id = 0;
memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
rtc_irq_eoi_tracking_reset(ioapic);
- update_handled_vectors(ioapic);
}
static const struct kvm_io_device_ops ioapic_mmio_ops = {
@@ -628,8 +610,10 @@ int kvm_ioapic_init(struct kvm *kvm)
if (ret < 0) {
kvm->arch.vioapic = NULL;
kfree(ioapic);
+ return ret;
}
+ kvm_vcpu_request_scan_ioapic(kvm);
return ret;
}
@@ -666,7 +650,6 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
ioapic->irr = 0;
ioapic->irr_delivered = 0;
- update_handled_vectors(ioapic);
kvm_vcpu_request_scan_ioapic(kvm);
kvm_ioapic_inject_all(ioapic, state->irr);
spin_unlock(&ioapic->lock);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index ca0b0b4e6256..084617d37c74 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -9,6 +9,7 @@ struct kvm;
struct kvm_vcpu;
#define IOAPIC_NUM_PINS KVM_IOAPIC_NUM_PINS
+#define MAX_NR_RESERVED_IOAPIC_PINS KVM_MAX_IRQ_ROUTES
#define IOAPIC_VERSION_ID 0x11 /* IOAPIC version */
#define IOAPIC_EDGE_TRIG 0
#define IOAPIC_LEVEL_TRIG 1
@@ -73,7 +74,6 @@ struct kvm_ioapic {
struct kvm *kvm;
void (*ack_notifier)(void *opaque, int irq);
spinlock_t lock;
- DECLARE_BITMAP(handled_vectors, 256);
struct rtc_status rtc_status;
struct delayed_work eoi_inject;
u32 irq_eoi[IOAPIC_NUM_PINS];
@@ -98,11 +98,12 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
return kvm->arch.vioapic;
}
-static inline bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
+static inline int ioapic_in_kernel(struct kvm *kvm)
{
- struct kvm_ioapic *ioapic = kvm->arch.vioapic;
- smp_rmb();
- return test_bit(vector, ioapic->handled_vectors);
+ int ret;
+
+ ret = (ioapic_irqchip(kvm) != NULL);
+ return ret;
}
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
@@ -120,7 +121,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, unsigned long *dest_map);
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
-void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
- u32 *tmr);
+void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
#endif
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index a1ec6a50a05a..097060e33bd6 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -38,14 +38,27 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
/*
+ * check if there is a pending userspace external interrupt
+ */
+static int pending_userspace_extint(struct kvm_vcpu *v)
+{
+ return v->arch.pending_external_vector != -1;
+}
+
+/*
* check if there is pending interrupt from
* non-APIC source without intack.
*/
static int kvm_cpu_has_extint(struct kvm_vcpu *v)
{
- if (kvm_apic_accept_pic_intr(v))
- return pic_irqchip(v->kvm)->output; /* PIC */
- else
+ u8 accept = kvm_apic_accept_pic_intr(v);
+
+ if (accept) {
+ if (irqchip_split(v->kvm))
+ return pending_userspace_extint(v);
+ else
+ return pic_irqchip(v->kvm)->output;
+ } else
return 0;
}
@@ -57,13 +70,13 @@ static int kvm_cpu_has_extint(struct kvm_vcpu *v)
*/
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
{
- if (!irqchip_in_kernel(v->kvm))
+ if (!lapic_in_kernel(v))
return v->arch.interrupt.pending;
if (kvm_cpu_has_extint(v))
return 1;
- if (kvm_apic_vid_enabled(v->kvm))
+ if (kvm_vcpu_apic_vid_enabled(v))
return 0;
return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
@@ -75,7 +88,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
*/
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
{
- if (!irqchip_in_kernel(v->kvm))
+ if (!lapic_in_kernel(v))
return v->arch.interrupt.pending;
if (kvm_cpu_has_extint(v))
@@ -91,9 +104,16 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
*/
static int kvm_cpu_get_extint(struct kvm_vcpu *v)
{
- if (kvm_cpu_has_extint(v))
- return kvm_pic_read_irq(v->kvm); /* PIC */
- return -1;
+ if (kvm_cpu_has_extint(v)) {
+ if (irqchip_split(v->kvm)) {
+ int vector = v->arch.pending_external_vector;
+
+ v->arch.pending_external_vector = -1;
+ return vector;
+ } else
+ return kvm_pic_read_irq(v->kvm); /* PIC */
+ } else
+ return -1;
}
/*
@@ -103,7 +123,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
{
int vector;
- if (!irqchip_in_kernel(v->kvm))
+ if (!lapic_in_kernel(v))
return v->arch.interrupt.nr;
vector = kvm_cpu_get_extint(v);
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 3d782a2c336a..ae5c78f2337d 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -83,13 +83,38 @@ static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
return kvm->arch.vpic;
}
+static inline int pic_in_kernel(struct kvm *kvm)
+{
+ int ret;
+
+ ret = (pic_irqchip(kvm) != NULL);
+ return ret;
+}
+
+static inline int irqchip_split(struct kvm *kvm)
+{
+ return kvm->arch.irqchip_split;
+}
+
static inline int irqchip_in_kernel(struct kvm *kvm)
{
struct kvm_pic *vpic = pic_irqchip(kvm);
+ bool ret;
+
+ ret = (vpic != NULL);
+ ret |= irqchip_split(kvm);
/* Read vpic before kvm->irq_routing. */
smp_rmb();
- return vpic != NULL;
+ return ret;
+}
+
+static inline int lapic_in_kernel(struct kvm_vcpu *vcpu)
+{
+ /* Same as irqchip_in_kernel(vcpu->kvm), but with less
+ * pointer chasing and no unnecessary memory barriers.
+ */
+ return vcpu->arch.apic != NULL;
}
void kvm_pic_reset(struct kvm_kpic_state *s);
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 9efff9e5b58c..84b96d319909 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -91,8 +91,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
return r;
}
-static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
- struct kvm_lapic_irq *irq)
+void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm_lapic_irq *irq)
{
trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);
@@ -108,6 +108,7 @@ static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
irq->level = 1;
irq->shorthand = 0;
}
+EXPORT_SYMBOL_GPL(kvm_set_msi_irq);
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level, bool line_status)
@@ -123,12 +124,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
}
-static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm)
+int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status)
{
struct kvm_lapic_irq irq;
int r;
+ if (unlikely(e->type != KVM_IRQ_ROUTING_MSI))
+ return -EWOULDBLOCK;
+
kvm_set_msi_irq(e, &irq);
if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
@@ -137,42 +142,6 @@ static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
return -EWOULDBLOCK;
}
-/*
- * Deliver an IRQ in an atomic context if we can, or return a failure,
- * user can retry in a process context.
- * Return value:
- * -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context.
- * Other values - No need to retry.
- */
-int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
-{
- struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
- struct kvm_kernel_irq_routing_entry *e;
- int ret = -EINVAL;
- int idx;
-
- trace_kvm_set_irq(irq, level, irq_source_id);
-
- /*
- * Injection into either PIC or IOAPIC might need to scan all CPUs,
- * which would need to be retried from thread context; when same GSI
- * is connected to both PIC and IOAPIC, we'd have to report a
- * partial failure here.
- * Since there's no easy way to do this, we only support injecting MSI
- * which is limited to 1:1 GSI mapping.
- */
- idx = srcu_read_lock(&kvm->irq_srcu);
- if (kvm_irq_map_gsi(kvm, entries, irq) > 0) {
- e = &entries[0];
- if (likely(e->type == KVM_IRQ_ROUTING_MSI))
- ret = kvm_set_msi_inatomic(e, kvm);
- else
- ret = -EWOULDBLOCK;
- }
- srcu_read_unlock(&kvm->irq_srcu, idx);
- return ret;
-}
-
int kvm_request_irq_source_id(struct kvm *kvm)
{
unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
@@ -208,7 +177,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
goto unlock;
}
clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
- if (!irqchip_in_kernel(kvm))
+ if (!ioapic_in_kernel(kvm))
goto unlock;
kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
@@ -297,6 +266,33 @@ out:
return r;
}
+bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
+ struct kvm_vcpu **dest_vcpu)
+{
+ int i, r = 0;
+ struct kvm_vcpu *vcpu;
+
+ if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu))
+ return true;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!kvm_apic_present(vcpu))
+ continue;
+
+ if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand,
+ irq->dest_id, irq->dest_mode))
+ continue;
+
+ if (++r == 2)
+ return false;
+
+ *dest_vcpu = vcpu;
+ }
+
+ return r == 1;
+}
+EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu);
+
#define IOAPIC_ROUTING_ENTRY(irq) \
{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
.u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
@@ -328,3 +324,54 @@ int kvm_setup_default_irq_routing(struct kvm *kvm)
return kvm_set_irq_routing(kvm, default_routing,
ARRAY_SIZE(default_routing), 0);
}
+
+static const struct kvm_irq_routing_entry empty_routing[] = {};
+
+int kvm_setup_empty_irq_routing(struct kvm *kvm)
+{
+ return kvm_set_irq_routing(kvm, empty_routing, 0, 0);
+}
+
+void kvm_arch_irq_routing_update(struct kvm *kvm)
+{
+ if (ioapic_in_kernel(kvm) || !irqchip_in_kernel(kvm))
+ return;
+ kvm_make_scan_ioapic_request(kvm);
+}
+
+void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_kernel_irq_routing_entry *entry;
+ struct kvm_irq_routing_table *table;
+ u32 i, nr_ioapic_pins;
+ int idx;
+
+ /* kvm->irq_routing must be read after clearing
+ * KVM_SCAN_IOAPIC. */
+ smp_mb();
+ idx = srcu_read_lock(&kvm->irq_srcu);
+ table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+ nr_ioapic_pins = min_t(u32, table->nr_rt_entries,
+ kvm->arch.nr_reserved_ioapic_pins);
+ for (i = 0; i < nr_ioapic_pins; ++i) {
+ hlist_for_each_entry(entry, &table->map[i], link) {
+ u32 dest_id, dest_mode;
+ bool level;
+
+ if (entry->type != KVM_IRQ_ROUTING_MSI)
+ continue;
+ dest_id = (entry->msi.address_lo >> 12) & 0xff;
+ dest_mode = (entry->msi.address_lo >> 2) & 0x1;
+ level = entry->msi.data & MSI_DATA_TRIGGER_LEVEL;
+ if (level && kvm_apic_match_dest(vcpu, NULL, 0,
+ dest_id, dest_mode)) {
+ u32 vector = entry->msi.data & 0xff;
+
+ __set_bit(vector,
+ (unsigned long *) eoi_exit_bitmap);
+ }
+ }
+ }
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 8d9013c5e1ee..ecd4ea1d28a8 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -209,7 +209,7 @@ out:
if (old)
kfree_rcu(old, rcu);
- kvm_vcpu_request_scan_ioapic(kvm);
+ kvm_make_scan_ioapic_request(kvm);
}
static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
@@ -348,6 +348,8 @@ void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
struct kvm_lapic *apic = vcpu->arch.apic;
__kvm_apic_update_irr(pir, apic->regs);
+
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
@@ -390,7 +392,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
vcpu = apic->vcpu;
- if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) {
+ if (unlikely(kvm_vcpu_apic_vid_enabled(vcpu))) {
/* try to update RVI */
apic_clear_vector(vec, apic->regs + APIC_IRR);
kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -551,15 +553,6 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
}
-void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
- int i;
-
- for (i = 0; i < 8; i++)
- apic_set_reg(apic, APIC_TMR + 0x10 * i, tmr[i]);
-}
-
static void apic_update_ppr(struct kvm_lapic *apic)
{
u32 tpr, isrv, ppr, old_ppr;
@@ -764,6 +757,65 @@ out:
return ret;
}
+bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
+ struct kvm_vcpu **dest_vcpu)
+{
+ struct kvm_apic_map *map;
+ bool ret = false;
+ struct kvm_lapic *dst = NULL;
+
+ if (irq->shorthand)
+ return false;
+
+ rcu_read_lock();
+ map = rcu_dereference(kvm->arch.apic_map);
+
+ if (!map)
+ goto out;
+
+ if (irq->dest_mode == APIC_DEST_PHYSICAL) {
+ if (irq->dest_id == 0xFF)
+ goto out;
+
+ if (irq->dest_id >= ARRAY_SIZE(map->phys_map))
+ goto out;
+
+ dst = map->phys_map[irq->dest_id];
+ if (dst && kvm_apic_present(dst->vcpu))
+ *dest_vcpu = dst->vcpu;
+ else
+ goto out;
+ } else {
+ u16 cid;
+ unsigned long bitmap = 1;
+ int i, r = 0;
+
+ if (!kvm_apic_logical_map_valid(map))
+ goto out;
+
+ apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap);
+
+ if (cid >= ARRAY_SIZE(map->logical_map))
+ goto out;
+
+ for_each_set_bit(i, &bitmap, 16) {
+ dst = map->logical_map[cid][i];
+ if (++r == 2)
+ goto out;
+ }
+
+ if (dst && kvm_apic_present(dst->vcpu))
+ *dest_vcpu = dst->vcpu;
+ else
+ goto out;
+ }
+
+ ret = true;
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
/*
* Add a pending IRQ into lapic.
* Return 1 if successfully added and 0 if discarded.
@@ -781,6 +833,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
case APIC_DM_LOWEST:
vcpu->arch.apic_arb_prio++;
case APIC_DM_FIXED:
+ if (unlikely(trig_mode && !level))
+ break;
+
/* FIXME add logic for vcpu on reset */
if (unlikely(!apic_enabled(apic)))
break;
@@ -790,6 +845,13 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
if (dest_map)
__set_bit(vcpu->vcpu_id, dest_map);
+ if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
+ if (trig_mode)
+ apic_set_vector(vector, apic->regs + APIC_TMR);
+ else
+ apic_clear_vector(vector, apic->regs + APIC_TMR);
+ }
+
if (kvm_x86_ops->deliver_posted_interrupt)
kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
else {
@@ -868,16 +930,32 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
}
+static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
+{
+ return test_bit(vector, (ulong *)apic->vcpu->arch.eoi_exit_bitmap);
+}
+
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
{
- if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
- int trigger_mode;
- if (apic_test_vector(vector, apic->regs + APIC_TMR))
- trigger_mode = IOAPIC_LEVEL_TRIG;
- else
- trigger_mode = IOAPIC_EDGE_TRIG;
- kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
+ int trigger_mode;
+
+ /* Eoi the ioapic only if the ioapic doesn't own the vector. */
+ if (!kvm_ioapic_handles_vector(apic, vector))
+ return;
+
+ /* Request a KVM exit to inform the userspace IOAPIC. */
+ if (irqchip_split(apic->vcpu->kvm)) {
+ apic->vcpu->arch.pending_ioapic_eoi = vector;
+ kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
+ return;
}
+
+ if (apic_test_vector(vector, apic->regs + APIC_TMR))
+ trigger_mode = IOAPIC_LEVEL_TRIG;
+ else
+ trigger_mode = IOAPIC_EDGE_TRIG;
+
+ kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
}
static int apic_set_eoi(struct kvm_lapic *apic)
@@ -1615,7 +1693,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
}
- apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
+ apic->irr_pending = kvm_vcpu_apic_vid_enabled(vcpu);
apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0;
apic->highest_isr_cache = -1;
update_divide_count(apic);
@@ -1838,7 +1916,10 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
apic_find_highest_isr(apic));
kvm_make_request(KVM_REQ_EVENT, vcpu);
- kvm_rtc_eoi_tracking_restore_one(vcpu);
+ if (ioapic_in_kernel(vcpu->kvm))
+ kvm_rtc_eoi_tracking_restore_one(vcpu);
+
+ vcpu->arch.apic_arb_prio = 0;
}
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
@@ -1922,7 +2003,7 @@ static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
/* Cache not set: could be safe but we don't bother. */
apic->highest_isr_cache == -1 ||
/* Need EOI to update ioapic. */
- kvm_ioapic_handles_vector(vcpu->kvm, apic->highest_isr_cache)) {
+ kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
/*
* PV EOI was disabled by apic_sync_pv_eoi_from_guest
* so we need not do anything here.
@@ -1978,7 +2059,7 @@ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
struct kvm_lapic *apic = vcpu->arch.apic;
u32 reg = (msr - APIC_BASE_MSR) << 4;
- if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
+ if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
return 1;
if (reg == APIC_ICR2)
@@ -1995,7 +2076,7 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
struct kvm_lapic *apic = vcpu->arch.apic;
u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
- if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
+ if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
return 1;
if (reg == APIC_DFR || reg == APIC_ICR2) {
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 764037991d26..fde8e35d5850 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -57,7 +57,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
void kvm_apic_set_version(struct kvm_vcpu *vcpu);
-void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr);
void __kvm_apic_update_irr(u32 *pir, void *regs);
void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
@@ -144,9 +143,9 @@ static inline int apic_x2apic_mode(struct kvm_lapic *apic)
return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
}
-static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
+static inline bool kvm_vcpu_apic_vid_enabled(struct kvm_vcpu *vcpu)
{
- return kvm_x86_ops->vm_has_apicv(kvm);
+ return kvm_x86_ops->cpu_uses_apicv(vcpu);
}
static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
@@ -169,4 +168,6 @@ bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
void wait_lapic_expire(struct kvm_vcpu *vcpu);
+bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
+ struct kvm_vcpu **dest_vcpu);
#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ff606f507913..7d85bcae3332 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -818,14 +818,11 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm->arch.indirect_shadow_pages--;
}
-static int has_wrprotected_page(struct kvm_vcpu *vcpu,
- gfn_t gfn,
- int level)
+static int __has_wrprotected_page(gfn_t gfn, int level,
+ struct kvm_memory_slot *slot)
{
- struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
if (slot) {
linfo = lpage_info_slot(gfn, slot, level);
return linfo->write_count;
@@ -834,6 +831,14 @@ static int has_wrprotected_page(struct kvm_vcpu *vcpu,
return 1;
}
+static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
+{
+ struct kvm_memory_slot *slot;
+
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ return __has_wrprotected_page(gfn, level, slot);
+}
+
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
{
unsigned long page_size;
@@ -851,6 +856,17 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
return ret;
}
+static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
+ bool no_dirty_log)
+{
+ if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
+ return false;
+ if (no_dirty_log && slot->dirty_bitmap)
+ return false;
+
+ return true;
+}
+
static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
bool no_dirty_log)
@@ -858,21 +874,25 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
- (no_dirty_log && slot->dirty_bitmap))
+ if (!memslot_valid_for_gpte(slot, no_dirty_log))
slot = NULL;
return slot;
}
-static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
-{
- return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
-}
-
-static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
+ bool *force_pt_level)
{
int host_level, level, max_level;
+ struct kvm_memory_slot *slot;
+
+ if (unlikely(*force_pt_level))
+ return PT_PAGE_TABLE_LEVEL;
+
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
+ *force_pt_level = !memslot_valid_for_gpte(slot, true);
+ if (unlikely(*force_pt_level))
+ return PT_PAGE_TABLE_LEVEL;
host_level = host_mapping_level(vcpu->kvm, large_gfn);
@@ -882,7 +902,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
- if (has_wrprotected_page(vcpu, large_gfn, level))
+ if (__has_wrprotected_page(large_gfn, level, slot))
break;
return level - 1;
@@ -2962,14 +2982,13 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
{
int r;
int level;
- int force_pt_level;
+ bool force_pt_level = false;
pfn_t pfn;
unsigned long mmu_seq;
bool map_writable, write = error_code & PFERR_WRITE_MASK;
- force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
+ level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) {
- level = mapping_level(vcpu, gfn);
/*
* This path builds a PAE pagetable - so we can map
* 2mb pages at maximum. Therefore check if the level
@@ -2979,8 +2998,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
level = PT_DIRECTORY_LEVEL;
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
- } else
- level = PT_PAGE_TABLE_LEVEL;
+ }
if (fast_page_fault(vcpu, v, level, error_code))
return 0;
@@ -3427,7 +3445,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
static bool can_do_async_pf(struct kvm_vcpu *vcpu)
{
- if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
+ if (unlikely(!lapic_in_kernel(vcpu) ||
kvm_event_needs_reinjection(vcpu)))
return false;
@@ -3476,7 +3494,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
pfn_t pfn;
int r;
int level;
- int force_pt_level;
+ bool force_pt_level;
gfn_t gfn = gpa >> PAGE_SHIFT;
unsigned long mmu_seq;
int write = error_code & PFERR_WRITE_MASK;
@@ -3495,20 +3513,15 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
if (r)
return r;
- if (mapping_level_dirty_bitmap(vcpu, gfn) ||
- !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL))
- force_pt_level = 1;
- else
- force_pt_level = 0;
-
+ force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn,
+ PT_DIRECTORY_LEVEL);
+ level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) {
- level = mapping_level(vcpu, gfn);
if (level > PT_DIRECTORY_LEVEL &&
!check_hugepage_cache_consistency(vcpu, gfn, level))
level = PT_DIRECTORY_LEVEL;
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
- } else
- level = PT_PAGE_TABLE_LEVEL;
+ }
if (fast_page_fault(vcpu, gpa, level, error_code))
return 0;
@@ -3706,7 +3719,7 @@ static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
int maxphyaddr, bool execonly)
{
- int pte;
+ u64 bad_mt_xwr;
rsvd_check->rsvd_bits_mask[0][3] =
rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
@@ -3724,14 +3737,16 @@ __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
- for (pte = 0; pte < 64; pte++) {
- int rwx_bits = pte & 7;
- int mt = pte >> 3;
- if (mt == 0x2 || mt == 0x3 || mt == 0x7 ||
- rwx_bits == 0x2 || rwx_bits == 0x6 ||
- (rwx_bits == 0x4 && !execonly))
- rsvd_check->bad_mt_xwr |= (1ull << pte);
+ bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */
+ bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */
+ bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */
+ bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */
+ bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */
+ if (!execonly) {
+ /* bits 0..2 must not be 100 unless VMX capabilities allow it */
+ bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
}
+ rsvd_check->bad_mt_xwr = bad_mt_xwr;
}
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 736e6ab8784d..b41faa91a6f9 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -698,7 +698,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
int r;
pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
- int force_pt_level;
+ bool force_pt_level = false;
unsigned long mmu_seq;
bool map_writable, is_self_change_mapping;
@@ -743,15 +743,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
&walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
- if (walker.level >= PT_DIRECTORY_LEVEL)
- force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
- || is_self_change_mapping;
- else
- force_pt_level = 1;
- if (!force_pt_level) {
- level = min(walker.level, mapping_level(vcpu, walker.gfn));
- walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
- }
+ if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
+ level = mapping_level(vcpu, walker.gfn, &force_pt_level);
+ if (likely(!force_pt_level)) {
+ level = min(walker.level, level);
+ walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
+ }
+ } else
+ force_pt_level = true;
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2f9ed1ff0632..f2c8e4917688 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -159,6 +159,9 @@ struct vcpu_svm {
u32 apf_reason;
u64 tsc_ratio;
+
+ /* cached guest cpuid flags for faster access */
+ bool nrips_enabled : 1;
};
static DEFINE_PER_CPU(u64, current_tsc_ratio);
@@ -1086,7 +1089,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
return target_tsc - tsc;
}
-static void init_vmcb(struct vcpu_svm *svm, bool init_event)
+static void init_vmcb(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save;
@@ -1157,8 +1160,7 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
- if (!init_event)
- svm_set_efer(&svm->vcpu, 0);
+ svm_set_efer(&svm->vcpu, 0);
save->dr6 = 0xffff0ff0;
kvm_set_rflags(&svm->vcpu, 2);
save->rip = 0x0000fff0;
@@ -1212,7 +1214,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
}
- init_vmcb(svm, init_event);
+ init_vmcb(svm);
kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
@@ -1268,7 +1270,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
clear_page(svm->vmcb);
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
svm->asid_generation = 0;
- init_vmcb(svm, false);
+ init_vmcb(svm);
svm_init_osvw(&svm->vcpu);
@@ -1890,7 +1892,7 @@ static int shutdown_interception(struct vcpu_svm *svm)
* so reinitialize it.
*/
clear_page(svm->vmcb);
- init_vmcb(svm, false);
+ init_vmcb(svm);
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
return 0;
@@ -2365,7 +2367,9 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
- nested_vmcb->control.next_rip = vmcb->control.next_rip;
+
+ if (svm->nrips_enabled)
+ nested_vmcb->control.next_rip = vmcb->control.next_rip;
/*
* If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
@@ -3060,7 +3064,7 @@ static int cr8_write_interception(struct vcpu_svm *svm)
u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */
r = cr_interception(svm);
- if (irqchip_in_kernel(svm->vcpu.kvm))
+ if (lapic_in_kernel(&svm->vcpu))
return r;
if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
return r;
@@ -3294,24 +3298,11 @@ static int msr_interception(struct vcpu_svm *svm)
static int interrupt_window_interception(struct vcpu_svm *svm)
{
- struct kvm_run *kvm_run = svm->vcpu.run;
-
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
svm_clear_vintr(svm);
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
mark_dirty(svm->vmcb, VMCB_INTR);
++svm->vcpu.stat.irq_window_exits;
- /*
- * If the user space waits to inject interrupts, exit as soon as
- * possible
- */
- if (!irqchip_in_kernel(svm->vcpu.kvm) &&
- kvm_run->request_interrupt_window &&
- !kvm_cpu_has_interrupt(&svm->vcpu)) {
- kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
- return 0;
- }
-
return 1;
}
@@ -3659,12 +3650,12 @@ static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
return;
}
-static int svm_vm_has_apicv(struct kvm *kvm)
+static int svm_cpu_uses_apicv(struct kvm_vcpu *vcpu)
{
return 0;
}
-static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu)
{
return;
}
@@ -4098,6 +4089,10 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ /* Update nrips enabled cache */
+ svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu);
}
static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -4425,7 +4420,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.enable_irq_window = enable_irq_window,
.update_cr8_intercept = update_cr8_intercept,
.set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
- .vm_has_apicv = svm_vm_has_apicv,
+ .cpu_uses_apicv = svm_cpu_uses_apicv,
.load_eoi_exitmap = svm_load_eoi_exitmap,
.sync_pir_to_irr = svm_sync_pir_to_irr,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 4eae7c35ddf5..120302511802 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -129,6 +129,24 @@ TRACE_EVENT(kvm_pio,
);
/*
+ * Tracepoint for fast mmio.
+ */
+TRACE_EVENT(kvm_fast_mmio,
+ TP_PROTO(u64 gpa),
+ TP_ARGS(gpa),
+
+ TP_STRUCT__entry(
+ __field(u64, gpa)
+ ),
+
+ TP_fast_assign(
+ __entry->gpa = gpa;
+ ),
+
+ TP_printk("fast mmio at gpa 0x%llx", __entry->gpa)
+);
+
+/*
* Tracepoint for cpuid.
*/
TRACE_EVENT(kvm_cpuid,
@@ -974,6 +992,39 @@ TRACE_EVENT(kvm_enter_smm,
__entry->smbase)
);
+/*
+ * Tracepoint for VT-d posted-interrupts.
+ */
+TRACE_EVENT(kvm_pi_irte_update,
+ TP_PROTO(unsigned int vcpu_id, unsigned int gsi,
+ unsigned int gvec, u64 pi_desc_addr, bool set),
+ TP_ARGS(vcpu_id, gsi, gvec, pi_desc_addr, set),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, vcpu_id )
+ __field( unsigned int, gsi )
+ __field( unsigned int, gvec )
+ __field( u64, pi_desc_addr )
+ __field( bool, set )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->gsi = gsi;
+ __entry->gvec = gvec;
+ __entry->pi_desc_addr = pi_desc_addr;
+ __entry->set = set;
+ ),
+
+ TP_printk("VT-d PI is %s for this irq, vcpu %u, gsi: 0x%x, "
+ "gvec: 0x%x, pi_desc_addr: 0x%llx",
+ __entry->set ? "enabled and being updated" : "disabled",
+ __entry->vcpu_id,
+ __entry->gsi,
+ __entry->gvec,
+ __entry->pi_desc_addr)
+);
+
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6a8bc64566ab..5eb56ed77c1f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -35,6 +35,7 @@
#include "kvm_cache_regs.h"
#include "x86.h"
+#include <asm/cpu.h>
#include <asm/io.h>
#include <asm/desc.h>
#include <asm/vmx.h>
@@ -45,6 +46,7 @@
#include <asm/debugreg.h>
#include <asm/kexec.h>
#include <asm/apic.h>
+#include <asm/irq_remapping.h>
#include "trace.h"
#include "pmu.h"
@@ -424,6 +426,9 @@ struct nested_vmx {
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
u64 vmcs01_debugctl;
+ u16 vpid02;
+ u16 last_vpid;
+
u32 nested_vmx_procbased_ctls_low;
u32 nested_vmx_procbased_ctls_high;
u32 nested_vmx_true_procbased_ctls_low;
@@ -440,14 +445,33 @@ struct nested_vmx {
u32 nested_vmx_misc_low;
u32 nested_vmx_misc_high;
u32 nested_vmx_ept_caps;
+ u32 nested_vmx_vpid_caps;
};
#define POSTED_INTR_ON 0
+#define POSTED_INTR_SN 1
+
/* Posted-Interrupt Descriptor */
struct pi_desc {
u32 pir[8]; /* Posted interrupt requested */
- u32 control; /* bit 0 of control is outstanding notification bit */
- u32 rsvd[7];
+ union {
+ struct {
+ /* bit 256 - Outstanding Notification */
+ u16 on : 1,
+ /* bit 257 - Suppress Notification */
+ sn : 1,
+ /* bit 271:258 - Reserved */
+ rsvd_1 : 14;
+ /* bit 279:272 - Notification Vector */
+ u8 nv;
+ /* bit 287:280 - Reserved */
+ u8 rsvd_2;
+ /* bit 319:288 - Notification Destination */
+ u32 ndst;
+ };
+ u64 control;
+ };
+ u32 rsvd[6];
} __aligned(64);
static bool pi_test_and_set_on(struct pi_desc *pi_desc)
@@ -467,6 +491,30 @@ static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
}
+static inline void pi_clear_sn(struct pi_desc *pi_desc)
+{
+ return clear_bit(POSTED_INTR_SN,
+ (unsigned long *)&pi_desc->control);
+}
+
+static inline void pi_set_sn(struct pi_desc *pi_desc)
+{
+ return set_bit(POSTED_INTR_SN,
+ (unsigned long *)&pi_desc->control);
+}
+
+static inline int pi_test_on(struct pi_desc *pi_desc)
+{
+ return test_bit(POSTED_INTR_ON,
+ (unsigned long *)&pi_desc->control);
+}
+
+static inline int pi_test_sn(struct pi_desc *pi_desc)
+{
+ return test_bit(POSTED_INTR_SN,
+ (unsigned long *)&pi_desc->control);
+}
+
struct vcpu_vmx {
struct kvm_vcpu vcpu;
unsigned long host_rsp;
@@ -532,8 +580,6 @@ struct vcpu_vmx {
s64 vnmi_blocked_time;
u32 exit_reason;
- bool rdtscp_enabled;
-
/* Posted interrupt descriptor */
struct pi_desc pi_desc;
@@ -563,6 +609,11 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct vcpu_vmx, vcpu);
}
+static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
+{
+ return &(to_vmx(vcpu)->pi_desc);
+}
+
#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
#define FIELD(number, name) [number] = VMCS12_OFFSET(name)
#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
@@ -809,7 +860,7 @@ static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
static bool vmx_mpx_supported(void);
static bool vmx_xsaves_supported(void);
-static int vmx_vm_has_apicv(struct kvm *kvm);
+static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static void vmx_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
@@ -831,6 +882,13 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
+/*
+ * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
+ * can find which vCPU should be waken up.
+ */
+static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
+static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
+
static unsigned long *vmx_io_bitmap_a;
static unsigned long *vmx_io_bitmap_b;
static unsigned long *vmx_msr_bitmap_legacy;
@@ -946,9 +1004,9 @@ static inline bool cpu_has_vmx_tpr_shadow(void)
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
}
-static inline bool vm_need_tpr_shadow(struct kvm *kvm)
+static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
{
- return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
+ return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
}
static inline bool cpu_has_secondary_exec_ctrls(void)
@@ -983,7 +1041,8 @@ static inline bool cpu_has_vmx_virtual_intr_delivery(void)
static inline bool cpu_has_vmx_posted_intr(void)
{
- return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
+ return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
+ vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
}
static inline bool cpu_has_vmx_apicv(void)
@@ -1062,9 +1121,9 @@ static inline bool cpu_has_vmx_ple(void)
SECONDARY_EXEC_PAUSE_LOOP_EXITING;
}
-static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
+static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
{
- return flexpriority_enabled && irqchip_in_kernel(kvm);
+ return flexpriority_enabled && lapic_in_kernel(vcpu);
}
static inline bool cpu_has_vmx_vpid(void)
@@ -1157,6 +1216,11 @@ static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
}
+static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
+}
+
static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
{
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
@@ -1337,13 +1401,13 @@ static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
__loaded_vmcs_clear, loaded_vmcs, 1);
}
-static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
+static inline void vpid_sync_vcpu_single(int vpid)
{
- if (vmx->vpid == 0)
+ if (vpid == 0)
return;
if (cpu_has_vmx_invvpid_single())
- __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
+ __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
}
static inline void vpid_sync_vcpu_global(void)
@@ -1352,10 +1416,10 @@ static inline void vpid_sync_vcpu_global(void)
__invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
}
-static inline void vpid_sync_context(struct vcpu_vmx *vmx)
+static inline void vpid_sync_context(int vpid)
{
if (cpu_has_vmx_invvpid_single())
- vpid_sync_vcpu_single(vmx);
+ vpid_sync_vcpu_single(vpid);
else
vpid_sync_vcpu_global();
}
@@ -1895,6 +1959,52 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
preempt_enable();
}
+static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+ struct pi_desc old, new;
+ unsigned int dest;
+
+ if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+ !irq_remapping_cap(IRQ_POSTING_CAP))
+ return;
+
+ do {
+ old.control = new.control = pi_desc->control;
+
+ /*
+ * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there
+ * are two possible cases:
+ * 1. After running 'pre_block', context switch
+ * happened. For this case, 'sn' was set in
+ * vmx_vcpu_put(), so we need to clear it here.
+ * 2. After running 'pre_block', we were blocked,
+ * and woken up by some other guy. For this case,
+ * we don't need to do anything, 'pi_post_block'
+ * will do everything for us. However, we cannot
+ * check whether it is case #1 or case #2 here
+ * (maybe, not needed), so we also clear sn here,
+ * I think it is not a big deal.
+ */
+ if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) {
+ if (vcpu->cpu != cpu) {
+ dest = cpu_physical_id(cpu);
+
+ if (x2apic_enabled())
+ new.ndst = dest;
+ else
+ new.ndst = (dest << 8) & 0xFF00;
+ }
+
+ /* set 'NV' to 'notification vector' */
+ new.nv = POSTED_INTR_VECTOR;
+ }
+
+ /* Allow posting non-urgent interrupts */
+ new.sn = 0;
+ } while (cmpxchg(&pi_desc->control, old.control,
+ new.control) != old.control);
+}
/*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
@@ -1945,10 +2055,27 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
vmx->loaded_vmcs->cpu = cpu;
}
+
+ vmx_vcpu_pi_load(vcpu, cpu);
+}
+
+static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
+{
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+
+ if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+ !irq_remapping_cap(IRQ_POSTING_CAP))
+ return;
+
+ /* Set SN when the vCPU is preempted */
+ if (vcpu->preempted)
+ pi_set_sn(pi_desc);
}
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{
+ vmx_vcpu_pi_put(vcpu);
+
__vmx_load_host_state(to_vmx(vcpu));
if (!vmm_exclusive) {
__loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
@@ -2207,7 +2334,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
if (index >= 0)
move_msr_up(vmx, index, save_nmsrs++);
index = __find_msr_index(vmx, MSR_TSC_AUX);
- if (index >= 0 && vmx->rdtscp_enabled)
+ if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu))
move_msr_up(vmx, index, save_nmsrs++);
/*
* MSR_STAR is only needed on long mode guests, and only
@@ -2377,7 +2504,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_pinbased_ctls_high |=
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
PIN_BASED_VMX_PREEMPTION_TIMER;
- if (vmx_vm_has_apicv(vmx->vcpu.kvm))
+ if (vmx_cpu_uses_apicv(&vmx->vcpu))
vmx->nested.nested_vmx_pinbased_ctls_high |=
PIN_BASED_POSTED_INTR;
@@ -2471,10 +2598,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING |
- SECONDARY_EXEC_XSAVES;
+ SECONDARY_EXEC_XSAVES |
+ SECONDARY_EXEC_PCOMMIT;
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
@@ -2493,6 +2622,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
} else
vmx->nested.nested_vmx_ept_caps = 0;
+ if (enable_vpid)
+ vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
+ VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
+ else
+ vmx->nested.nested_vmx_vpid_caps = 0;
+
if (enable_unrestricted_guest)
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_UNRESTRICTED_GUEST;
@@ -2608,7 +2743,8 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
break;
case MSR_IA32_VMX_EPT_VPID_CAP:
/* Currently, no nested vpid support */
- *pdata = vmx->nested.nested_vmx_ept_caps;
+ *pdata = vmx->nested.nested_vmx_ept_caps |
+ ((u64)vmx->nested.nested_vmx_vpid_caps << 32);
break;
default:
return 1;
@@ -2673,7 +2809,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.ia32_xss;
break;
case MSR_TSC_AUX:
- if (!to_vmx(vcpu)->rdtscp_enabled)
+ if (!guest_cpuid_has_rdtscp(vcpu))
return 1;
/* Otherwise falls through */
default:
@@ -2779,7 +2915,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
break;
case MSR_TSC_AUX:
- if (!vmx->rdtscp_enabled)
+ if (!guest_cpuid_has_rdtscp(vcpu))
return 1;
/* Check reserved bit, higher 32 bits should be zero */
if ((data >> 32) != 0)
@@ -2874,6 +3010,8 @@ static int hardware_enable(void)
return -EBUSY;
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
+ INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
+ spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
/*
* Now we can enable the vmclear operation in kdump
@@ -3015,7 +3153,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_SHADOW_VMCS |
SECONDARY_EXEC_XSAVES |
- SECONDARY_EXEC_ENABLE_PML;
+ SECONDARY_EXEC_ENABLE_PML |
+ SECONDARY_EXEC_PCOMMIT;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
@@ -3441,9 +3580,9 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
#endif
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
{
- vpid_sync_context(to_vmx(vcpu));
+ vpid_sync_context(vpid);
if (enable_ept) {
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
@@ -3451,6 +3590,11 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
}
}
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+{
+ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
+}
+
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -3644,20 +3788,21 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (!is_paging(vcpu)) {
hw_cr4 &= ~X86_CR4_PAE;
hw_cr4 |= X86_CR4_PSE;
- /*
- * SMEP/SMAP is disabled if CPU is in non-paging mode
- * in hardware. However KVM always uses paging mode to
- * emulate guest non-paging mode with TDP.
- * To emulate this behavior, SMEP/SMAP needs to be
- * manually disabled when guest switches to non-paging
- * mode.
- */
- hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
} else if (!(cr4 & X86_CR4_PAE)) {
hw_cr4 &= ~X86_CR4_PAE;
}
}
+ if (!enable_unrestricted_guest && !is_paging(vcpu))
+ /*
+ * SMEP/SMAP is disabled if CPU is in non-paging mode in
+ * hardware. However KVM always uses paging mode without
+ * unrestricted guest.
+ * To emulate this behavior, SMEP/SMAP needs to be manually
+ * disabled when guest switches to non-paging mode.
+ */
+ hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
+
vmcs_writel(CR4_READ_SHADOW, cr4);
vmcs_writel(GUEST_CR4, hw_cr4);
return 0;
@@ -4146,29 +4291,28 @@ static int alloc_identity_pagetable(struct kvm *kvm)
return r;
}
-static void allocate_vpid(struct vcpu_vmx *vmx)
+static int allocate_vpid(void)
{
int vpid;
- vmx->vpid = 0;
if (!enable_vpid)
- return;
+ return 0;
spin_lock(&vmx_vpid_lock);
vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
- if (vpid < VMX_NR_VPIDS) {
- vmx->vpid = vpid;
+ if (vpid < VMX_NR_VPIDS)
__set_bit(vpid, vmx_vpid_bitmap);
- }
+ else
+ vpid = 0;
spin_unlock(&vmx_vpid_lock);
+ return vpid;
}
-static void free_vpid(struct vcpu_vmx *vmx)
+static void free_vpid(int vpid)
{
- if (!enable_vpid)
+ if (!enable_vpid || vpid == 0)
return;
spin_lock(&vmx_vpid_lock);
- if (vmx->vpid != 0)
- __clear_bit(vmx->vpid, vmx_vpid_bitmap);
+ __clear_bit(vpid, vmx_vpid_bitmap);
spin_unlock(&vmx_vpid_lock);
}
@@ -4323,9 +4467,9 @@ static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
msr, MSR_TYPE_W);
}
-static int vmx_vm_has_apicv(struct kvm *kvm)
+static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu)
{
- return enable_apicv && irqchip_in_kernel(kvm);
+ return enable_apicv && lapic_in_kernel(vcpu);
}
static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
@@ -4369,6 +4513,22 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_SMP
if (vcpu->mode == IN_GUEST_MODE) {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * Currently, we don't support urgent interrupt,
+ * all interrupts are recognized as non-urgent
+ * interrupt, so we cannot post interrupts when
+ * 'SN' is set.
+ *
+ * If the vcpu is in guest mode, it means it is
+ * running instead of being scheduled out and
+ * waiting in the run queue, and that's the only
+ * case when 'SN' is set currently, warning if
+ * 'SN' is set.
+ */
+ WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
+
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
POSTED_INTR_VECTOR);
return true;
@@ -4505,7 +4665,7 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
{
u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
- if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
+ if (!vmx_cpu_uses_apicv(&vmx->vcpu))
pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
return pin_based_exec_ctrl;
}
@@ -4517,7 +4677,7 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
exec_control &= ~CPU_BASED_MOV_DR_EXITING;
- if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
+ if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
exec_control &= ~CPU_BASED_TPR_SHADOW;
#ifdef CONFIG_X86_64
exec_control |= CPU_BASED_CR8_STORE_EXITING |
@@ -4534,7 +4694,7 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
{
u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
- if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
+ if (!cpu_need_virtualize_apic_accesses(&vmx->vcpu))
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
if (vmx->vpid == 0)
exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
@@ -4548,7 +4708,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
if (!ple_gap)
exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
- if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
+ if (!vmx_cpu_uses_apicv(&vmx->vcpu))
exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
@@ -4558,8 +4718,12 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
a current VMCS12
*/
exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
- /* PML is enabled/disabled in creating/destorying vcpu */
- exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
+
+ if (!enable_pml)
+ exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
+
+ /* Currently, we allow L1 guest to directly run pcommit instruction. */
+ exec_control &= ~SECONDARY_EXEC_PCOMMIT;
return exec_control;
}
@@ -4604,12 +4768,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
- if (cpu_has_secondary_exec_ctrls()) {
+ if (cpu_has_secondary_exec_ctrls())
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
vmx_secondary_exec_control(vmx));
- }
- if (vmx_vm_has_apicv(vmx->vcpu.kvm)) {
+ if (vmx_cpu_uses_apicv(&vmx->vcpu)) {
vmcs_write64(EOI_EXIT_BITMAP0, 0);
vmcs_write64(EOI_EXIT_BITMAP1, 0);
vmcs_write64(EOI_EXIT_BITMAP2, 0);
@@ -4753,7 +4916,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
if (cpu_has_vmx_tpr_shadow() && !init_event) {
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
- if (vm_need_tpr_shadow(vcpu->kvm))
+ if (cpu_need_tpr_shadow(vcpu))
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
__pa(vcpu->arch.apic->regs));
vmcs_write32(TPR_THRESHOLD, 0);
@@ -4761,7 +4924,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
- if (vmx_vm_has_apicv(vcpu->kvm))
+ if (vmx_cpu_uses_apicv(vcpu))
memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
if (vmx->vpid != 0)
@@ -4771,12 +4934,11 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmx_set_cr0(vcpu, cr0); /* enter rmode */
vmx->vcpu.arch.cr0 = cr0;
vmx_set_cr4(vcpu, 0);
- if (!init_event)
- vmx_set_efer(vcpu, 0);
+ vmx_set_efer(vcpu, 0);
vmx_fpu_activate(vcpu);
update_exception_bitmap(vcpu);
- vpid_sync_context(vmx);
+ vpid_sync_context(vmx->vpid);
}
/*
@@ -5296,7 +5458,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
u8 cr8 = (u8)val;
err = kvm_set_cr8(vcpu, cr8);
kvm_complete_insn_gp(vcpu, err);
- if (irqchip_in_kernel(vcpu->kvm))
+ if (lapic_in_kernel(vcpu))
return 1;
if (cr8_prev <= cr8)
return 1;
@@ -5510,17 +5672,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
kvm_make_request(KVM_REQ_EVENT, vcpu);
++vcpu->stat.irq_window_exits;
-
- /*
- * If the user space waits to inject interrupts, exit as soon as
- * possible
- */
- if (!irqchip_in_kernel(vcpu->kvm) &&
- vcpu->run->request_interrupt_window &&
- !kvm_cpu_has_interrupt(vcpu)) {
- vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
- return 0;
- }
return 1;
}
@@ -5753,6 +5904,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
skip_emulated_instruction(vcpu);
+ trace_kvm_fast_mmio(gpa);
return 1;
}
@@ -5910,6 +6062,25 @@ static void update_ple_window_actual_max(void)
ple_window_grow, INT_MIN);
}
+/*
+ * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
+ */
+static void wakeup_handler(void)
+{
+ struct kvm_vcpu *vcpu;
+ int cpu = smp_processor_id();
+
+ spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+ list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
+ blocked_vcpu_list) {
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+
+ if (pi_test_on(pi_desc) == 1)
+ kvm_vcpu_kick(vcpu);
+ }
+ spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+}
+
static __init int hardware_setup(void)
{
int r = -ENOMEM, i, msr;
@@ -6096,6 +6267,8 @@ static __init int hardware_setup(void)
kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
}
+ kvm_set_posted_intr_wakeup_handler(wakeup_handler);
+
return alloc_kvm_area();
out8:
@@ -6627,7 +6800,6 @@ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
{
- u32 exec_control;
if (vmx->nested.current_vmptr == -1ull)
return;
@@ -6640,9 +6812,8 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
they were modified */
copy_shadow_to_vmcs12(vmx);
vmx->nested.sync_shadow_vmcs = false;
- exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
- exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+ vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_SHADOW_VMCS);
vmcs_write64(VMCS_LINK_POINTER, -1ull);
}
vmx->nested.posted_intr_nv = -1;
@@ -6662,6 +6833,7 @@ static void free_nested(struct vcpu_vmx *vmx)
return;
vmx->nested.vmxon = false;
+ free_vpid(vmx->nested.vpid02);
nested_release_vmcs12(vmx);
if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs);
@@ -7038,7 +7210,6 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
gpa_t vmptr;
- u32 exec_control;
if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -7070,9 +7241,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
vmx->nested.current_vmcs12 = new_vmcs12;
vmx->nested.current_vmcs12_page = page;
if (enable_shadow_vmcs) {
- exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
- exec_control |= SECONDARY_EXEC_SHADOW_VMCS;
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+ vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_SHADOW_VMCS);
vmcs_write64(VMCS_LINK_POINTER,
__pa(vmx->nested.current_shadow_vmcs));
vmx->nested.sync_shadow_vmcs = true;
@@ -7178,7 +7348,63 @@ static int handle_invept(struct kvm_vcpu *vcpu)
static int handle_invvpid(struct kvm_vcpu *vcpu)
{
- kvm_queue_exception(vcpu, UD_VECTOR);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 vmx_instruction_info;
+ unsigned long type, types;
+ gva_t gva;
+ struct x86_exception e;
+ int vpid;
+
+ if (!(vmx->nested.nested_vmx_secondary_ctls_high &
+ SECONDARY_EXEC_ENABLE_VPID) ||
+ !(vmx->nested.nested_vmx_vpid_caps & VMX_VPID_INVVPID_BIT)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
+
+ types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
+
+ if (!(types & (1UL << type))) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ return 1;
+ }
+
+ /* according to the intel vmx instruction reference, the memory
+ * operand is read even if it isn't needed (e.g., for type==global)
+ */
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmx_instruction_info, false, &gva))
+ return 1;
+ if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
+ sizeof(u32), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+
+ switch (type) {
+ case VMX_VPID_EXTENT_ALL_CONTEXT:
+ if (get_vmcs12(vcpu)->virtual_processor_id == 0) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ return 1;
+ }
+ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
+ nested_vmx_succeed(vcpu);
+ break;
+ default:
+ /* Trap single context invalidation invvpid calls */
+ BUG_ON(1);
+ break;
+ }
+
+ skip_emulated_instruction(vcpu);
return 1;
}
@@ -7207,6 +7433,13 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_pcommit(struct kvm_vcpu *vcpu)
+{
+ /* we never catch pcommit instruct for L1 guest. */
+ WARN_ON(1);
+ return 1;
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -7257,6 +7490,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_XSAVES] = handle_xsaves,
[EXIT_REASON_XRSTORS] = handle_xrstors,
[EXIT_REASON_PML_FULL] = handle_pml_full,
+ [EXIT_REASON_PCOMMIT] = handle_pcommit,
};
static const int kvm_vmx_max_exit_handlers =
@@ -7558,6 +7792,8 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* the XSS exit bitmap in vmcs12.
*/
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
+ case EXIT_REASON_PCOMMIT:
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_PCOMMIT);
default:
return true;
}
@@ -7569,10 +7805,9 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
*info2 = vmcs_read32(VM_EXIT_INTR_INFO);
}
-static int vmx_enable_pml(struct vcpu_vmx *vmx)
+static int vmx_create_pml_buffer(struct vcpu_vmx *vmx)
{
struct page *pml_pg;
- u32 exec_control;
pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!pml_pg)
@@ -7583,24 +7818,15 @@ static int vmx_enable_pml(struct vcpu_vmx *vmx)
vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
- exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
- exec_control |= SECONDARY_EXEC_ENABLE_PML;
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
-
return 0;
}
-static void vmx_disable_pml(struct vcpu_vmx *vmx)
+static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
{
- u32 exec_control;
-
- ASSERT(vmx->pml_pg);
- __free_page(vmx->pml_pg);
- vmx->pml_pg = NULL;
-
- exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
- exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+ if (vmx->pml_pg) {
+ __free_page(vmx->pml_pg);
+ vmx->pml_pg = NULL;
+ }
}
static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
@@ -7924,10 +8150,10 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
* apicv
*/
if (!cpu_has_vmx_virtualize_x2apic_mode() ||
- !vmx_vm_has_apicv(vcpu->kvm))
+ !vmx_cpu_uses_apicv(vcpu))
return;
- if (!vm_need_tpr_shadow(vcpu->kvm))
+ if (!cpu_need_tpr_shadow(vcpu))
return;
sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
@@ -8029,9 +8255,10 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
}
}
-static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu)
{
- if (!vmx_vm_has_apicv(vcpu->kvm))
+ u64 *eoi_exit_bitmap = vcpu->arch.eoi_exit_bitmap;
+ if (!vmx_cpu_uses_apicv(vcpu))
return;
vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
@@ -8477,8 +8704,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (enable_pml)
- vmx_disable_pml(vmx);
- free_vpid(vmx);
+ vmx_destroy_pml_buffer(vmx);
+ free_vpid(vmx->vpid);
leave_guest_mode(vcpu);
vmx_load_vmcs01(vcpu);
free_nested(vmx);
@@ -8497,7 +8724,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
if (!vmx)
return ERR_PTR(-ENOMEM);
- allocate_vpid(vmx);
+ vmx->vpid = allocate_vpid();
err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
if (err)
@@ -8530,7 +8757,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
put_cpu();
if (err)
goto free_vmcs;
- if (vm_need_virtualize_apic_accesses(kvm)) {
+ if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
err = alloc_apic_access_page(kvm);
if (err)
goto free_vmcs;
@@ -8545,8 +8772,10 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto free_vmcs;
}
- if (nested)
+ if (nested) {
nested_vmx_setup_ctls_msrs(vmx);
+ vmx->nested.vpid02 = allocate_vpid();
+ }
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
@@ -8559,7 +8788,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
* for the guest, etc.
*/
if (enable_pml) {
- err = vmx_enable_pml(vmx);
+ err = vmx_create_pml_buffer(vmx);
if (err)
goto free_vmcs;
}
@@ -8567,13 +8796,14 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
return &vmx->vcpu;
free_vmcs:
+ free_vpid(vmx->nested.vpid02);
free_loaded_vmcs(vmx->loaded_vmcs);
free_msrs:
kfree(vmx->guest_msrs);
uninit_vcpu:
kvm_vcpu_uninit(&vmx->vcpu);
free_vcpu:
- free_vpid(vmx);
+ free_vpid(vmx->vpid);
kmem_cache_free(kvm_vcpu_cache, vmx);
return ERR_PTR(err);
}
@@ -8648,49 +8878,67 @@ static int vmx_get_lpage_level(void)
return PT_PDPE_LEVEL;
}
+static void vmcs_set_secondary_exec_control(u32 new_ctl)
+{
+ /*
+ * These bits in the secondary execution controls field
+ * are dynamic, the others are mostly based on the hypervisor
+ * architecture and the guest's CPUID. Do not touch the
+ * dynamic bits.
+ */
+ u32 mask =
+ SECONDARY_EXEC_SHADOW_VMCS |
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+
+ u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+ (new_ctl & ~mask) | (cur_ctl & mask));
+}
+
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 exec_control;
+ u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx);
- vmx->rdtscp_enabled = false;
if (vmx_rdtscp_supported()) {
- exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
- if (exec_control & SECONDARY_EXEC_RDTSCP) {
- best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
- if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
- vmx->rdtscp_enabled = true;
- else {
- exec_control &= ~SECONDARY_EXEC_RDTSCP;
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
- exec_control);
- }
+ bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu);
+ if (!rdtscp_enabled)
+ secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP;
+
+ if (nested) {
+ if (rdtscp_enabled)
+ vmx->nested.nested_vmx_secondary_ctls_high |=
+ SECONDARY_EXEC_RDTSCP;
+ else
+ vmx->nested.nested_vmx_secondary_ctls_high &=
+ ~SECONDARY_EXEC_RDTSCP;
}
- if (nested && !vmx->rdtscp_enabled)
- vmx->nested.nested_vmx_secondary_ctls_high &=
- ~SECONDARY_EXEC_RDTSCP;
}
/* Exposing INVPCID only when PCID is exposed */
best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
if (vmx_invpcid_supported() &&
- best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
- guest_cpuid_has_pcid(vcpu)) {
- exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
- exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
- exec_control);
- } else {
- if (cpu_has_secondary_exec_ctrls()) {
- exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
- exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
- exec_control);
- }
+ (!best || !(best->ebx & bit(X86_FEATURE_INVPCID)) ||
+ !guest_cpuid_has_pcid(vcpu))) {
+ secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID;
+
if (best)
best->ebx &= ~bit(X86_FEATURE_INVPCID);
}
+
+ vmcs_set_secondary_exec_control(secondary_exec_ctl);
+
+ if (static_cpu_has(X86_FEATURE_PCOMMIT) && nested) {
+ if (guest_cpuid_has_pcommit(vcpu))
+ vmx->nested.nested_vmx_secondary_ctls_high |=
+ SECONDARY_EXEC_PCOMMIT;
+ else
+ vmx->nested.nested_vmx_secondary_ctls_high &=
+ ~SECONDARY_EXEC_PCOMMIT;
+ }
}
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -9298,13 +9546,13 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
if (cpu_has_secondary_exec_ctrls()) {
exec_control = vmx_secondary_exec_control(vmx);
- if (!vmx->rdtscp_enabled)
- exec_control &= ~SECONDARY_EXEC_RDTSCP;
+
/* Take the following fields only from vmcs12 */
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
- SECONDARY_EXEC_APIC_REGISTER_VIRT);
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_PCOMMIT);
if (nested_cpu_has(vmcs12,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
exec_control |= vmcs12->secondary_vm_exec_control;
@@ -9323,7 +9571,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs_write64(APIC_ACCESS_ADDR,
page_to_phys(vmx->nested.apic_access_page));
} else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) &&
- (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))) {
+ cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
exec_control |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
kvm_vcpu_reload_apic_access_page(vcpu);
@@ -9433,12 +9681,24 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
if (enable_vpid) {
/*
- * Trivially support vpid by letting L2s share their parent
- * L1's vpid. TODO: move to a more elaborate solution, giving
- * each L2 its own vpid and exposing the vpid feature to L1.
+ * There is no direct mapping between vpid02 and vpid12, the
+ * vpid02 is per-vCPU for L0 and reused while the value of
+ * vpid12 is changed w/ one invvpid during nested vmentry.
+ * The vpid12 is allocated by L1 for L2, so it will not
+ * influence global bitmap(for vpid01 and vpid02 allocation)
+ * even if spawn a lot of nested vCPUs.
*/
- vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
- vmx_flush_tlb(vcpu);
+ if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) {
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
+ if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
+ vmx->nested.last_vpid = vmcs12->virtual_processor_id;
+ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
+ }
+ } else {
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+ vmx_flush_tlb(vcpu);
+ }
+
}
if (nested_cpu_has_ept(vmcs12)) {
@@ -10278,6 +10538,201 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
}
+/*
+ * This routine does the following things for vCPU which is going
+ * to be blocked if VT-d PI is enabled.
+ * - Store the vCPU to the wakeup list, so when interrupts happen
+ * we can find the right vCPU to wake up.
+ * - Change the Posted-interrupt descriptor as below:
+ * 'NDST' <-- vcpu->pre_pcpu
+ * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR
+ * - If 'ON' is set during this process, which means at least one
+ * interrupt is posted for this vCPU, we cannot block it, in
+ * this case, return 1, otherwise, return 0.
+ *
+ */
+static int vmx_pre_block(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ unsigned int dest;
+ struct pi_desc old, new;
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+
+ if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+ !irq_remapping_cap(IRQ_POSTING_CAP))
+ return 0;
+
+ vcpu->pre_pcpu = vcpu->cpu;
+ spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
+ vcpu->pre_pcpu), flags);
+ list_add_tail(&vcpu->blocked_vcpu_list,
+ &per_cpu(blocked_vcpu_on_cpu,
+ vcpu->pre_pcpu));
+ spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock,
+ vcpu->pre_pcpu), flags);
+
+ do {
+ old.control = new.control = pi_desc->control;
+
+ /*
+ * We should not block the vCPU if
+ * an interrupt is posted for it.
+ */
+ if (pi_test_on(pi_desc) == 1) {
+ spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
+ vcpu->pre_pcpu), flags);
+ list_del(&vcpu->blocked_vcpu_list);
+ spin_unlock_irqrestore(
+ &per_cpu(blocked_vcpu_on_cpu_lock,
+ vcpu->pre_pcpu), flags);
+ vcpu->pre_pcpu = -1;
+
+ return 1;
+ }
+
+ WARN((pi_desc->sn == 1),
+ "Warning: SN field of posted-interrupts "
+ "is set before blocking\n");
+
+ /*
+ * Since vCPU can be preempted during this process,
+ * vcpu->cpu could be different with pre_pcpu, we
+ * need to set pre_pcpu as the destination of wakeup
+ * notification event, then we can find the right vCPU
+ * to wakeup in wakeup handler if interrupts happen
+ * when the vCPU is in blocked state.
+ */
+ dest = cpu_physical_id(vcpu->pre_pcpu);
+
+ if (x2apic_enabled())
+ new.ndst = dest;
+ else
+ new.ndst = (dest << 8) & 0xFF00;
+
+ /* set 'NV' to 'wakeup vector' */
+ new.nv = POSTED_INTR_WAKEUP_VECTOR;
+ } while (cmpxchg(&pi_desc->control, old.control,
+ new.control) != old.control);
+
+ return 0;
+}
+
+static void vmx_post_block(struct kvm_vcpu *vcpu)
+{
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+ struct pi_desc old, new;
+ unsigned int dest;
+ unsigned long flags;
+
+ if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+ !irq_remapping_cap(IRQ_POSTING_CAP))
+ return;
+
+ do {
+ old.control = new.control = pi_desc->control;
+
+ dest = cpu_physical_id(vcpu->cpu);
+
+ if (x2apic_enabled())
+ new.ndst = dest;
+ else
+ new.ndst = (dest << 8) & 0xFF00;
+
+ /* Allow posting non-urgent interrupts */
+ new.sn = 0;
+
+ /* set 'NV' to 'notification vector' */
+ new.nv = POSTED_INTR_VECTOR;
+ } while (cmpxchg(&pi_desc->control, old.control,
+ new.control) != old.control);
+
+ if(vcpu->pre_pcpu != -1) {
+ spin_lock_irqsave(
+ &per_cpu(blocked_vcpu_on_cpu_lock,
+ vcpu->pre_pcpu), flags);
+ list_del(&vcpu->blocked_vcpu_list);
+ spin_unlock_irqrestore(
+ &per_cpu(blocked_vcpu_on_cpu_lock,
+ vcpu->pre_pcpu), flags);
+ vcpu->pre_pcpu = -1;
+ }
+}
+
+/*
+ * vmx_update_pi_irte - set IRTE for Posted-Interrupts
+ *
+ * @kvm: kvm
+ * @host_irq: host irq of the interrupt
+ * @guest_irq: gsi of the interrupt
+ * @set: set or unset PI
+ * returns 0 on success, < 0 on failure
+ */
+static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set)
+{
+ struct kvm_kernel_irq_routing_entry *e;
+ struct kvm_irq_routing_table *irq_rt;
+ struct kvm_lapic_irq irq;
+ struct kvm_vcpu *vcpu;
+ struct vcpu_data vcpu_info;
+ int idx, ret = -EINVAL;
+
+ if (!kvm_arch_has_assigned_device(kvm) ||
+ !irq_remapping_cap(IRQ_POSTING_CAP))
+ return 0;
+
+ idx = srcu_read_lock(&kvm->irq_srcu);
+ irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+ BUG_ON(guest_irq >= irq_rt->nr_rt_entries);
+
+ hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
+ if (e->type != KVM_IRQ_ROUTING_MSI)
+ continue;
+ /*
+ * VT-d PI cannot support posting multicast/broadcast
+ * interrupts to a vCPU, we still use interrupt remapping
+ * for these kind of interrupts.
+ *
+ * For lowest-priority interrupts, we only support
+ * those with single CPU as the destination, e.g. user
+ * configures the interrupts via /proc/irq or uses
+ * irqbalance to make the interrupts single-CPU.
+ *
+ * We will support full lowest-priority interrupt later.
+ */
+
+ kvm_set_msi_irq(e, &irq);
+ if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu))
+ continue;
+
+ vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
+ vcpu_info.vector = irq.vector;
+
+ trace_kvm_pi_irte_update(vcpu->vcpu_id, e->gsi,
+ vcpu_info.vector, vcpu_info.pi_desc_addr, set);
+
+ if (set)
+ ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
+ else {
+ /* suppress notification event before unposting */
+ pi_set_sn(vcpu_to_pi_desc(vcpu));
+ ret = irq_set_vcpu_affinity(host_irq, NULL);
+ pi_clear_sn(vcpu_to_pi_desc(vcpu));
+ }
+
+ if (ret < 0) {
+ printk(KERN_INFO "%s: failed to update PI IRTE\n",
+ __func__);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+ return ret;
+}
+
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -10347,7 +10802,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.update_cr8_intercept = update_cr8_intercept,
.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
- .vm_has_apicv = vmx_vm_has_apicv,
+ .cpu_uses_apicv = vmx_cpu_uses_apicv,
.load_eoi_exitmap = vmx_load_eoi_exitmap,
.hwapic_irr_update = vmx_hwapic_irr_update,
.hwapic_isr_update = vmx_hwapic_isr_update,
@@ -10394,7 +10849,12 @@ static struct kvm_x86_ops vmx_x86_ops = {
.flush_log_dirty = vmx_flush_log_dirty,
.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
+ .pre_block = vmx_pre_block,
+ .post_block = vmx_post_block,
+
.pmu_ops = &intel_pmu_ops,
+
+ .update_pi_irte = vmx_update_pi_irte,
};
static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bda65690788e..4a6eff166fc6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -51,6 +51,8 @@
#include <linux/pci.h>
#include <linux/timekeeper_internal.h>
#include <linux/pvclock_gtod.h>
+#include <linux/kvm_irqfd.h>
+#include <linux/irqbypass.h>
#include <trace/events/kvm.h>
#define CREATE_TRACE_POINTS
@@ -64,6 +66,7 @@
#include <asm/fpu/internal.h> /* Ugh! */
#include <asm/pvclock.h>
#include <asm/div64.h>
+#include <asm/irq_remapping.h>
#define MAX_IO_MSRS 256
#define KVM_MAX_MCE_BANKS 32
@@ -622,7 +625,9 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if ((cr0 ^ old_cr0) & update_bits)
kvm_mmu_reset_context(vcpu);
- if ((cr0 ^ old_cr0) & X86_CR0_CD)
+ if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
+ kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
+ !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
return 0;
@@ -789,7 +794,7 @@ int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
if (cr8 & CR8_RESERVED_BITS)
return 1;
- if (irqchip_in_kernel(vcpu->kvm))
+ if (lapic_in_kernel(vcpu))
kvm_lapic_set_tpr(vcpu, cr8);
else
vcpu->arch.cr8 = cr8;
@@ -799,7 +804,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr8);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
{
- if (irqchip_in_kernel(vcpu->kvm))
+ if (lapic_in_kernel(vcpu))
return kvm_lapic_get_cr8(vcpu);
else
return vcpu->arch.cr8;
@@ -953,6 +958,9 @@ static u32 emulated_msrs[] = {
HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
+ HV_X64_MSR_RESET,
+ HV_X64_MSR_VP_INDEX,
+ HV_X64_MSR_VP_RUNTIME,
HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_KVM_PV_EOI_EN,
@@ -1898,6 +1906,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
static void record_steal_time(struct kvm_vcpu *vcpu)
{
+ accumulate_steal_time(vcpu);
+
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
@@ -2048,12 +2058,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & KVM_MSR_ENABLED))
break;
- vcpu->arch.st.last_steal = current->sched_info.run_delay;
-
- preempt_disable();
- accumulate_steal_time(vcpu);
- preempt_enable();
-
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
break;
@@ -2449,6 +2453,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_DISABLE_QUIRKS:
case KVM_CAP_SET_BOOT_CPU_ID:
+ case KVM_CAP_SPLIT_IRQCHIP:
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
case KVM_CAP_ASSIGN_DEV_IRQ:
case KVM_CAP_PCI_2_3:
@@ -2628,7 +2633,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu;
}
- accumulate_steal_time(vcpu);
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
}
@@ -2662,12 +2666,24 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
{
if (irq->irq >= KVM_NR_INTERRUPTS)
return -EINVAL;
- if (irqchip_in_kernel(vcpu->kvm))
+
+ if (!irqchip_in_kernel(vcpu->kvm)) {
+ kvm_queue_interrupt(vcpu, irq->irq, false);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ return 0;
+ }
+
+ /*
+ * With in-kernel LAPIC, we only use this to inject EXTINT, so
+ * fail for in-kernel 8259.
+ */
+ if (pic_in_kernel(vcpu->kvm))
return -ENXIO;
- kvm_queue_interrupt(vcpu, irq->irq, false);
- kvm_make_request(KVM_REQ_EVENT, vcpu);
+ if (vcpu->arch.pending_external_vector != -1)
+ return -EEXIST;
+ vcpu->arch.pending_external_vector = irq->irq;
return 0;
}
@@ -3176,7 +3192,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_vapic_addr va;
r = -EINVAL;
- if (!irqchip_in_kernel(vcpu->kvm))
+ if (!lapic_in_kernel(vcpu))
goto out;
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
@@ -3425,41 +3441,35 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
- int r = 0;
-
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
- return r;
+ return 0;
}
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
- int r = 0;
-
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
- return r;
+ return 0;
}
static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
- int r = 0;
-
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
sizeof(ps->channels));
ps->flags = kvm->arch.vpit->pit_state.flags;
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
memset(&ps->reserved, 0, sizeof(ps->reserved));
- return r;
+ return 0;
}
static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
- int r = 0, start = 0;
+ int start = 0;
u32 prev_legacy, cur_legacy;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
@@ -3471,7 +3481,7 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
kvm->arch.vpit->pit_state.flags = ps->flags;
kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
- return r;
+ return 0;
}
static int kvm_vm_ioctl_reinject(struct kvm *kvm,
@@ -3556,6 +3566,28 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
kvm->arch.disabled_quirks = cap->args[0];
r = 0;
break;
+ case KVM_CAP_SPLIT_IRQCHIP: {
+ mutex_lock(&kvm->lock);
+ r = -EINVAL;
+ if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
+ goto split_irqchip_unlock;
+ r = -EEXIST;
+ if (irqchip_in_kernel(kvm))
+ goto split_irqchip_unlock;
+ if (atomic_read(&kvm->online_vcpus))
+ goto split_irqchip_unlock;
+ r = kvm_setup_empty_irq_routing(kvm);
+ if (r)
+ goto split_irqchip_unlock;
+ /* Pairs with irqchip_in_kernel. */
+ smp_wmb();
+ kvm->arch.irqchip_split = true;
+ kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
+ r = 0;
+split_irqchip_unlock:
+ mutex_unlock(&kvm->lock);
+ break;
+ }
default:
r = -EINVAL;
break;
@@ -3669,7 +3701,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
r = -ENXIO;
- if (!irqchip_in_kernel(kvm))
+ if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
goto get_irqchip_out;
r = kvm_vm_ioctl_get_irqchip(kvm, chip);
if (r)
@@ -3693,7 +3725,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
r = -ENXIO;
- if (!irqchip_in_kernel(kvm))
+ if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
goto set_irqchip_out;
r = kvm_vm_ioctl_set_irqchip(kvm, chip);
if (r)
@@ -4060,6 +4092,15 @@ static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
}
+static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr, void *val, unsigned int bytes)
+{
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);
+
+ return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
+}
+
int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val,
unsigned int bytes,
@@ -4795,6 +4836,7 @@ static const struct x86_emulate_ops emulate_ops = {
.write_gpr = emulator_write_gpr,
.read_std = kvm_read_guest_virt_system,
.write_std = kvm_write_guest_virt_system,
+ .read_phys = kvm_read_guest_phys_system,
.fetch = kvm_fetch_guest_virt,
.read_emulated = emulator_read_emulated,
.write_emulated = emulator_write_emulated,
@@ -5667,7 +5709,7 @@ void kvm_arch_exit(void)
int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.halt_exits;
- if (irqchip_in_kernel(vcpu->kvm)) {
+ if (lapic_in_kernel(vcpu)) {
vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
return 1;
} else {
@@ -5774,9 +5816,15 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
*/
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
{
- return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
- vcpu->run->request_interrupt_window &&
- kvm_arch_interrupt_allowed(vcpu));
+ if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm))
+ return false;
+
+ if (kvm_cpu_has_interrupt(vcpu))
+ return false;
+
+ return (irqchip_split(vcpu->kvm)
+ ? kvm_apic_accept_pic_intr(vcpu)
+ : kvm_arch_interrupt_allowed(vcpu));
}
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
@@ -5787,13 +5835,17 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
- if (irqchip_in_kernel(vcpu->kvm))
- kvm_run->ready_for_interrupt_injection = 1;
- else
+ if (!irqchip_in_kernel(vcpu->kvm))
kvm_run->ready_for_interrupt_injection =
kvm_arch_interrupt_allowed(vcpu) &&
!kvm_cpu_has_interrupt(vcpu) &&
!kvm_event_needs_reinjection(vcpu);
+ else if (!pic_in_kernel(vcpu->kvm))
+ kvm_run->ready_for_interrupt_injection =
+ kvm_apic_accept_pic_intr(vcpu) &&
+ !kvm_cpu_has_interrupt(vcpu);
+ else
+ kvm_run->ready_for_interrupt_injection = 1;
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -6144,18 +6196,18 @@ static void process_smi(struct kvm_vcpu *vcpu)
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
{
- u64 eoi_exit_bitmap[4];
- u32 tmr[8];
-
if (!kvm_apic_hw_enabled(vcpu->arch.apic))
return;
- memset(eoi_exit_bitmap, 0, 32);
- memset(tmr, 0, 32);
+ memset(vcpu->arch.eoi_exit_bitmap, 0, 256 / 8);
- kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr);
- kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
- kvm_apic_update_tmr(vcpu, tmr);
+ if (irqchip_split(vcpu->kvm))
+ kvm_scan_ioapic_routes(vcpu, vcpu->arch.eoi_exit_bitmap);
+ else {
+ kvm_x86_ops->sync_pir_to_irr(vcpu);
+ kvm_ioapic_scan_entry(vcpu, vcpu->arch.eoi_exit_bitmap);
+ }
+ kvm_x86_ops->load_eoi_exitmap(vcpu);
}
static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
@@ -6168,7 +6220,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{
struct page *page = NULL;
- if (!irqchip_in_kernel(vcpu->kvm))
+ if (!lapic_in_kernel(vcpu))
return;
if (!kvm_x86_ops->set_apic_access_page_addr)
@@ -6206,7 +6258,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
- bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
+ bool req_int_win = !lapic_in_kernel(vcpu) &&
vcpu->run->request_interrupt_window;
bool req_immediate_exit = false;
@@ -6258,6 +6310,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_pmu_handle_event(vcpu);
if (kvm_check_request(KVM_REQ_PMI, vcpu))
kvm_pmu_deliver_pmi(vcpu);
+ if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
+ BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
+ if (test_bit(vcpu->arch.pending_ioapic_eoi,
+ (void *) vcpu->arch.eoi_exit_bitmap)) {
+ vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
+ vcpu->run->eoi.vector =
+ vcpu->arch.pending_ioapic_eoi;
+ r = 0;
+ goto out;
+ }
+ }
if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
vcpu_scan_ioapic(vcpu);
if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
@@ -6268,6 +6331,26 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
r = 0;
goto out;
}
+ if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+ vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
+ r = 0;
+ goto out;
+ }
+ }
+
+ /*
+ * KVM_REQ_EVENT is not set when posted interrupts are set by
+ * VT-d hardware, so we have to update RVI unconditionally.
+ */
+ if (kvm_lapic_enabled(vcpu)) {
+ /*
+ * Update architecture specific hints for APIC
+ * virtual interrupt delivery.
+ */
+ if (kvm_x86_ops->hwapic_irr_update)
+ kvm_x86_ops->hwapic_irr_update(vcpu,
+ kvm_lapic_find_highest_irr(vcpu));
}
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
@@ -6286,13 +6369,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_x86_ops->enable_irq_window(vcpu);
if (kvm_lapic_enabled(vcpu)) {
- /*
- * Update architecture specific hints for APIC
- * virtual interrupt delivery.
- */
- if (kvm_x86_ops->hwapic_irr_update)
- kvm_x86_ops->hwapic_irr_update(vcpu,
- kvm_lapic_find_highest_irr(vcpu));
update_cr8_intercept(vcpu);
kvm_lapic_sync_to_vapic(vcpu);
}
@@ -6428,10 +6504,15 @@ out:
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{
- if (!kvm_arch_vcpu_runnable(vcpu)) {
+ if (!kvm_arch_vcpu_runnable(vcpu) &&
+ (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+
+ if (kvm_x86_ops->post_block)
+ kvm_x86_ops->post_block(vcpu);
+
if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
return 1;
}
@@ -6468,10 +6549,12 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
for (;;) {
- if (kvm_vcpu_running(vcpu))
+ if (kvm_vcpu_running(vcpu)) {
r = vcpu_enter_guest(vcpu);
- else
+ } else {
r = vcpu_block(kvm, vcpu);
+ }
+
if (r <= 0)
break;
@@ -6480,8 +6563,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
kvm_inject_pending_timer_irqs(vcpu);
if (dm_request_for_irq_injection(vcpu)) {
- r = -EINTR;
- vcpu->run->exit_reason = KVM_EXIT_INTR;
+ r = 0;
+ vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
++vcpu->stat.request_irq_exits;
break;
}
@@ -6608,7 +6691,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
/* re-sync apic's tpr */
- if (!irqchip_in_kernel(vcpu->kvm)) {
+ if (!lapic_in_kernel(vcpu)) {
if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
r = -EINVAL;
goto out;
@@ -7308,7 +7391,7 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
{
- return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+ return irqchip_in_kernel(vcpu->kvm) == lapic_in_kernel(vcpu);
}
struct static_key kvm_no_apic_vcpu __read_mostly;
@@ -7377,6 +7460,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
+ vcpu->arch.pending_external_vector = -1;
+
return 0;
fail_free_mce_banks:
@@ -7402,7 +7487,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
kvm_mmu_destroy(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
free_page((unsigned long)vcpu->arch.pio_data);
- if (!irqchip_in_kernel(vcpu->kvm))
+ if (!lapic_in_kernel(vcpu))
static_key_slow_dec(&kvm_no_apic_vcpu);
}
@@ -8029,7 +8114,59 @@ bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
}
EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
+int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
+ struct irq_bypass_producer *prod)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+ if (kvm_x86_ops->update_pi_irte) {
+ irqfd->producer = prod;
+ return kvm_x86_ops->update_pi_irte(irqfd->kvm,
+ prod->irq, irqfd->gsi, 1);
+ }
+
+ return -EINVAL;
+}
+
+void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
+ struct irq_bypass_producer *prod)
+{
+ int ret;
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+ if (!kvm_x86_ops->update_pi_irte) {
+ WARN_ON(irqfd->producer != NULL);
+ return;
+ }
+
+ WARN_ON(irqfd->producer != prod);
+ irqfd->producer = NULL;
+
+ /*
+ * When producer of consumer is unregistered, we change back to
+ * remapped mode, so we can re-use the current implementation
+ * when the irq is masked/disabed or the consumer side (KVM
+ * int this case doesn't want to receive the interrupts.
+ */
+ ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
+ if (ret)
+ printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
+ " fails: %d\n", irqfd->consumer.token, ret);
+}
+
+int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set)
+{
+ if (!kvm_x86_ops->update_pi_irte)
+ return -EINVAL;
+
+ return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
+}
+
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
@@ -8044,3 +8181,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 55512dd62633..5bcdfc10c23a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -899,6 +899,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
struct cftype blkcg_files[] = {
{
.name = "stat",
+ .flags = CFTYPE_NOT_ON_ROOT,
.seq_show = blkcg_print_stat,
},
{ } /* terminate */
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index c75a2636dd40..2149a1ddbacf 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -369,7 +369,7 @@ static void throtl_pd_init(struct blkg_policy_data *pd)
* regardless of the position of the group in the hierarchy.
*/
sq->parent_sq = &td->service_queue;
- if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent)
+ if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
tg->td = td;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 04de88463a98..1f9093e901da 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1581,7 +1581,7 @@ static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
static void cfq_cpd_init(struct blkcg_policy_data *cpd)
{
struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
- unsigned int weight = cgroup_on_dfl(blkcg_root.css.cgroup) ?
+ unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
if (cpd_to_blkcg(cpd) == &blkcg_root)
@@ -1599,7 +1599,7 @@ static void cfq_cpd_free(struct blkcg_policy_data *cpd)
static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
{
struct blkcg *blkcg = cpd_to_blkcg(cpd);
- bool on_dfl = cgroup_on_dfl(blkcg_root.css.cgroup);
+ bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys);
unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
if (blkcg == &blkcg_root)
diff --git a/certs/.gitignore b/certs/.gitignore
new file mode 100644
index 000000000000..f51aea4a71ec
--- /dev/null
+++ b/certs/.gitignore
@@ -0,0 +1,4 @@
+#
+# Generated files
+#
+x509_certificate_list
diff --git a/crypto/asymmetric_keys/asymmetric_keys.h b/crypto/asymmetric_keys/asymmetric_keys.h
index 3f5b537ab33e..1d450b580245 100644
--- a/crypto/asymmetric_keys/asymmetric_keys.h
+++ b/crypto/asymmetric_keys/asymmetric_keys.h
@@ -14,8 +14,3 @@ extern struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id);
extern int __asymmetric_key_hex_to_key_id(const char *id,
struct asymmetric_key_id *match_id,
size_t hexlen);
-static inline
-const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key)
-{
- return key->type_data.p[1];
-}
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index 1916680ad81b..9f2165b27d52 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -307,25 +307,34 @@ static int asymmetric_key_preparse(struct key_preparsed_payload *prep)
}
/*
+ * Clean up the key ID list
+ */
+static void asymmetric_key_free_kids(struct asymmetric_key_ids *kids)
+{
+ int i;
+
+ if (kids) {
+ for (i = 0; i < ARRAY_SIZE(kids->id); i++)
+ kfree(kids->id[i]);
+ kfree(kids);
+ }
+}
+
+/*
* Clean up the preparse data
*/
static void asymmetric_key_free_preparse(struct key_preparsed_payload *prep)
{
- struct asymmetric_key_subtype *subtype = prep->type_data[0];
- struct asymmetric_key_ids *kids = prep->type_data[1];
- int i;
+ struct asymmetric_key_subtype *subtype = prep->payload.data[asym_subtype];
+ struct asymmetric_key_ids *kids = prep->payload.data[asym_key_ids];
pr_devel("==>%s()\n", __func__);
if (subtype) {
- subtype->destroy(prep->payload[0]);
+ subtype->destroy(prep->payload.data[asym_crypto]);
module_put(subtype->owner);
}
- if (kids) {
- for (i = 0; i < ARRAY_SIZE(kids->id); i++)
- kfree(kids->id[i]);
- kfree(kids);
- }
+ asymmetric_key_free_kids(kids);
kfree(prep->description);
}
@@ -335,20 +344,19 @@ static void asymmetric_key_free_preparse(struct key_preparsed_payload *prep)
static void asymmetric_key_destroy(struct key *key)
{
struct asymmetric_key_subtype *subtype = asymmetric_key_subtype(key);
- struct asymmetric_key_ids *kids = key->type_data.p[1];
+ struct asymmetric_key_ids *kids = key->payload.data[asym_key_ids];
+ void *data = key->payload.data[asym_crypto];
+
+ key->payload.data[asym_crypto] = NULL;
+ key->payload.data[asym_subtype] = NULL;
+ key->payload.data[asym_key_ids] = NULL;
if (subtype) {
- subtype->destroy(key->payload.data);
+ subtype->destroy(data);
module_put(subtype->owner);
- key->type_data.p[0] = NULL;
}
- if (kids) {
- kfree(kids->id[0]);
- kfree(kids->id[1]);
- kfree(kids);
- key->type_data.p[1] = NULL;
- }
+ asymmetric_key_free_kids(kids);
}
struct key_type key_type_asymmetric = {
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 81efccbe22d5..6db4c01c6503 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -49,7 +49,7 @@ EXPORT_SYMBOL_GPL(pkey_id_type_name);
static void public_key_describe(const struct key *asymmetric_key,
struct seq_file *m)
{
- struct public_key *key = asymmetric_key->payload.data;
+ struct public_key *key = asymmetric_key->payload.data[asym_crypto];
if (key)
seq_printf(m, "%s.%s",
@@ -112,7 +112,7 @@ EXPORT_SYMBOL_GPL(public_key_verify_signature);
static int public_key_verify_signature_2(const struct key *key,
const struct public_key_signature *sig)
{
- const struct public_key *pk = key->payload.data;
+ const struct public_key *pk = key->payload.data[asym_crypto];
return public_key_verify_signature(pk, sig);
}
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 7525fd183574..9441240f7d2a 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -37,7 +37,7 @@ int verify_signature(const struct key *key,
return -EINVAL;
subtype = asymmetric_key_subtype(key);
if (!subtype ||
- !key->payload.data)
+ !key->payload.data[0])
return -EINVAL;
if (!subtype->verify_signature)
return -ENOTSUPP;
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index 1de01eaec884..dbeed6018e63 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -11,6 +11,7 @@
#include <linux/time.h>
#include <crypto/public_key.h>
+#include <keys/asymmetric-type.h>
struct x509_certificate {
struct x509_certificate *next;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 68c3c40501ab..2a44b3752471 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -267,7 +267,8 @@ static int x509_validate_trust(struct x509_certificate *cert,
if (!IS_ERR(key)) {
if (!use_builtin_keys
|| test_bit(KEY_FLAG_BUILTIN, &key->flags))
- ret = x509_check_signature(key->payload.data, cert);
+ ret = x509_check_signature(key->payload.data[asym_crypto],
+ cert);
key_put(key);
}
return ret;
@@ -353,9 +354,9 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
/* We're pinning the module by being linked against it */
__module_get(public_key_subtype.owner);
- prep->type_data[0] = &public_key_subtype;
- prep->type_data[1] = kids;
- prep->payload[0] = cert->pub;
+ prep->payload.data[asym_subtype] = &public_key_subtype;
+ prep->payload.data[asym_key_ids] = kids;
+ prep->payload.data[asym_crypto] = cert->pub;
prep->description = desc;
prep->quotalen = 100;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 15e40ee62a94..6aaa3f81755b 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -175,6 +175,15 @@ config AHCI_XGENE
help
This option enables support for APM X-Gene SoC SATA host controller.
+config AHCI_QORIQ
+ tristate "Freescale QorIQ AHCI SATA support"
+ depends on OF
+ help
+ This option enables support for the Freescale QorIQ AHCI SoC's
+ onboard AHCI SATA.
+
+ If unsure, say N.
+
config SATA_FSL
tristate "Freescale 3.0Gbps SATA support"
depends on FSL_SOC
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index af70919f7dde..af45effac18c 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_TEGRA) += ahci_tegra.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_XGENE) += ahci_xgene.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_QORIQ) += ahci_qoriq.o libahci.o libahci_platform.o
# SFF w/ custom DMA
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index a46660204e3a..ff02bb4218fc 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -314,6 +314,16 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@@ -489,6 +499,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0),
.driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a2), /* 88se91a2 */
+ .driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 5b8e8a0fab48..45586c1dbbdc 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -181,6 +181,8 @@ enum {
PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
+ PORT_CMD_ESP = (1 << 21), /* External Sata Port */
+ PORT_CMD_HPCP = (1 << 18), /* HotPlug Capable Port */
PORT_CMD_PMP = (1 << 17), /* PMP attached */
PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 1befb114c384..04975b851c23 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -76,7 +76,6 @@ static const struct of_device_id ahci_of_match[] = {
{ .compatible = "ibm,476gtr-ahci", },
{ .compatible = "snps,dwc-ahci", },
{ .compatible = "hisilicon,hisi-ahci", },
- { .compatible = "fsl,qoriq-ahci", },
{},
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
new file mode 100644
index 000000000000..d0f9de96e4ea
--- /dev/null
+++ b/drivers/ata/ahci_qoriq.c
@@ -0,0 +1,279 @@
+/*
+ * Freescale QorIQ AHCI SATA platform driver
+ *
+ * Copyright 2015 Freescale, Inc.
+ * Tang Yuantian <Yuantian.Tang@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/ahci_platform.h>
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include "ahci.h"
+
+#define DRV_NAME "ahci-qoriq"
+
+/* port register definition */
+#define PORT_PHY1 0xA8
+#define PORT_PHY2 0xAC
+#define PORT_PHY3 0xB0
+#define PORT_PHY4 0xB4
+#define PORT_PHY5 0xB8
+#define PORT_TRANS 0xC8
+
+/* port register default value */
+#define AHCI_PORT_PHY_1_CFG 0xa003fffe
+#define AHCI_PORT_PHY_2_CFG 0x28183411
+#define AHCI_PORT_PHY_3_CFG 0x0e081004
+#define AHCI_PORT_PHY_4_CFG 0x00480811
+#define AHCI_PORT_PHY_5_CFG 0x192c96a4
+#define AHCI_PORT_TRANS_CFG 0x08000025
+
+#define SATA_ECC_DISABLE 0x00020000
+
+enum ahci_qoriq_type {
+ AHCI_LS1021A,
+ AHCI_LS1043A,
+ AHCI_LS2080A,
+};
+
+struct ahci_qoriq_priv {
+ struct ccsr_ahci *reg_base;
+ enum ahci_qoriq_type type;
+ void __iomem *ecc_addr;
+};
+
+static const struct of_device_id ahci_qoriq_of_match[] = {
+ { .compatible = "fsl,ls1021a-ahci", .data = (void *)AHCI_LS1021A},
+ { .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A},
+ { .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
+
+static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ u32 px_cmd, px_is, px_val;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_qoriq_priv *qoriq_priv = hpriv->plat_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+ int rc;
+ bool ls1021a_workaround = (qoriq_priv->type == AHCI_LS1021A);
+
+ DPRINTK("ENTER\n");
+
+ ahci_stop_engine(ap);
+
+ /*
+ * There is a errata on ls1021a Rev1.0 and Rev2.0 which is:
+ * A-009042: The device detection initialization sequence
+ * mistakenly resets some registers.
+ *
+ * Workaround for this is:
+ * The software should read and store PxCMD and PxIS values
+ * before issuing the device detection initialization sequence.
+ * After the sequence is complete, software should restore the
+ * PxCMD and PxIS with the stored values.
+ */
+ if (ls1021a_workaround) {
+ px_cmd = readl(port_mmio + PORT_CMD);
+ px_is = readl(port_mmio + PORT_IRQ_STAT);
+ }
+
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = ATA_BUSY;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+ rc = sata_link_hardreset(link, timing, deadline, &online,
+ ahci_check_ready);
+
+ /* restore the PxCMD and PxIS on ls1021 */
+ if (ls1021a_workaround) {
+ px_val = readl(port_mmio + PORT_CMD);
+ if (px_val != px_cmd)
+ writel(px_cmd, port_mmio + PORT_CMD);
+
+ px_val = readl(port_mmio + PORT_IRQ_STAT);
+ if (px_val != px_is)
+ writel(px_is, port_mmio + PORT_IRQ_STAT);
+ }
+
+ hpriv->start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+ return rc;
+}
+
+static struct ata_port_operations ahci_qoriq_ops = {
+ .inherits = &ahci_ops,
+ .hardreset = ahci_qoriq_hardreset,
+};
+
+static struct ata_port_info ahci_qoriq_port_info = {
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_qoriq_ops,
+};
+
+static struct scsi_host_template ahci_qoriq_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
+static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
+{
+ struct ahci_qoriq_priv *qpriv = hpriv->plat_data;
+ void __iomem *reg_base = hpriv->mmio;
+
+ switch (qpriv->type) {
+ case AHCI_LS1021A:
+ writel(SATA_ECC_DISABLE, qpriv->ecc_addr);
+ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_PHY_2_CFG, reg_base + PORT_PHY2);
+ writel(AHCI_PORT_PHY_3_CFG, reg_base + PORT_PHY3);
+ writel(AHCI_PORT_PHY_4_CFG, reg_base + PORT_PHY4);
+ writel(AHCI_PORT_PHY_5_CFG, reg_base + PORT_PHY5);
+ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ break;
+
+ case AHCI_LS1043A:
+ case AHCI_LS2080A:
+ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ break;
+ }
+
+ return 0;
+}
+
+static int ahci_qoriq_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct ahci_host_priv *hpriv;
+ struct ahci_qoriq_priv *qoriq_priv;
+ const struct of_device_id *of_id;
+ struct resource *res;
+ int rc;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ of_id = of_match_node(ahci_qoriq_of_match, np);
+ if (!of_id)
+ return -ENODEV;
+
+ qoriq_priv = devm_kzalloc(dev, sizeof(*qoriq_priv), GFP_KERNEL);
+ if (!qoriq_priv)
+ return -ENOMEM;
+
+ qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
+
+ if (qoriq_priv->type == AHCI_LS1021A) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "sata-ecc");
+ qoriq_priv->ecc_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qoriq_priv->ecc_addr))
+ return PTR_ERR(qoriq_priv->ecc_addr);
+ }
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ hpriv->plat_data = qoriq_priv;
+ rc = ahci_qoriq_phy_init(hpriv);
+ if (rc)
+ goto disable_resources;
+
+ /* Workaround for ls2080a */
+ if (qoriq_priv->type == AHCI_LS2080A) {
+ hpriv->flags |= AHCI_HFLAG_NO_NCQ;
+ ahci_qoriq_port_info.flags &= ~ATA_FLAG_NCQ;
+ }
+
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_qoriq_port_info,
+ &ahci_qoriq_sht);
+ if (rc)
+ goto disable_resources;
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+
+ return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ahci_qoriq_resume(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ rc = ahci_qoriq_phy_init(hpriv);
+ if (rc)
+ goto disable_resources;
+
+ rc = ahci_platform_resume_host(dev);
+ if (rc)
+ goto disable_resources;
+
+ /* We resumed so update PM runtime state */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+
+ return rc;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ahci_qoriq_pm_ops, ahci_platform_suspend,
+ ahci_qoriq_resume);
+
+static struct platform_driver ahci_qoriq_driver = {
+ .probe = ahci_qoriq_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ahci_qoriq_of_match,
+ .pm = &ahci_qoriq_pm_ops,
+ },
+};
+module_platform_driver(ahci_qoriq_driver);
+
+MODULE_DESCRIPTION("Freescale QorIQ AHCI SATA platform driver");
+MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index d256a66158be..096064cd6c52 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1117,6 +1117,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
int port_no, void __iomem *mmio,
void __iomem *port_mmio)
{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
const char *emsg = NULL;
int rc;
u32 tmp;
@@ -1138,6 +1139,12 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << port_no, mmio + HOST_IRQ_STAT);
+
+ /* mark esata ports */
+ tmp = readl(port_mmio + PORT_CMD);
+ if ((tmp & PORT_CMD_HPCP) ||
+ ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
+ ap->pflags |= ATA_PFLAG_EXTERNAL;
}
void ahci_init_controller(struct ata_host *host)
@@ -2486,28 +2493,13 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host, int irq,
rc = devm_request_threaded_irq(host->dev, irq + i,
ahci_multi_irqs_intr,
- ahci_port_thread_fn, IRQF_SHARED,
+ ahci_port_thread_fn, 0,
pp->irq_desc, host->ports[i]);
if (rc)
- goto out_free_irqs;
- }
-
- for (i = 0; i < host->n_ports; i++)
+ return rc;
ata_port_desc(host->ports[i], "irq %d", irq + i);
-
- rc = ata_host_register(host, sht);
- if (rc)
- goto out_free_all_irqs;
-
- return 0;
-
-out_free_all_irqs:
- i = host->n_ports;
-out_free_irqs:
- for (i--; i >= 0; i--)
- devm_free_irq(host->dev, irq + i, host->ports[i]);
-
- return rc;
+ }
+ return ata_host_register(host, sht);
}
/**
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0d7f0da3a269..8b3a7861fa44 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1757,6 +1757,15 @@ nothing_to_do:
return 1;
}
+static void ata_qc_done(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ void (*done)(struct scsi_cmnd *) = qc->scsidone;
+
+ ata_qc_free(qc);
+ done(cmd);
+}
+
static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
@@ -1774,28 +1783,17 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
* asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
*/
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
- ((cdb[2] & 0x20) || need_sense)) {
+ ((cdb[2] & 0x20) || need_sense))
ata_gen_passthru_sense(qc);
- } else {
- if (!need_sense) {
- cmd->result = SAM_STAT_GOOD;
- } else {
- /* TODO: decide which descriptor format to use
- * for 48b LBA devices and call that here
- * instead of the fixed desc, which is only
- * good for smaller LBA (and maybe CHS?)
- * devices.
- */
- ata_gen_ata_sense(qc);
- }
- }
+ else if (need_sense)
+ ata_gen_ata_sense(qc);
+ else
+ cmd->result = SAM_STAT_GOOD;
if (need_sense && !ap->ops->error_handler)
ata_dump_status(ap->print_id, &qc->result_tf);
- qc->scsidone(cmd);
-
- ata_qc_free(qc);
+ ata_qc_done(qc);
}
/**
@@ -2015,8 +2013,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
VPRINTK("ENTER\n");
- /* set scsi removable (RMB) bit per ata bit */
- if (ata_id_removable(args->id))
+ /* set scsi removable (RMB) bit per ata bit, or if the
+ * AHCI port says it's external (Hotplug-capable, eSATA).
+ */
+ if (ata_id_removable(args->id) ||
+ (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
hdr[1] |= (1 << 7);
if (args->dev->class == ATA_DEV_ZAC) {
@@ -2594,8 +2595,7 @@ static void atapi_sense_complete(struct ata_queued_cmd *qc)
ata_gen_passthru_sense(qc);
}
- qc->scsidone(qc->scsicmd);
- ata_qc_free(qc);
+ ata_qc_done(qc);
}
/* is it pointless to prefer PIO for "safety reasons"? */
@@ -2690,8 +2690,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
qc->dev->sdev->locked = 0;
qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
- qc->scsidone(cmd);
- ata_qc_free(qc);
+ ata_qc_done(qc);
return;
}
@@ -2735,8 +2734,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
cmd->result = SAM_STAT_GOOD;
}
- qc->scsidone(cmd);
- ata_qc_free(qc);
+ ata_qc_done(qc);
}
/**
* atapi_xlat - Initialize PACKET taskfile
@@ -2914,12 +2912,14 @@ ata_scsi_map_proto(u8 byte1)
case 5: /* PIO Data-out */
return ATA_PROT_PIO;
+ case 12: /* FPDMA */
+ return ATA_PROT_NCQ;
+
case 0: /* Hard Reset */
case 1: /* SRST */
case 8: /* Device Diagnostic */
case 9: /* Device Reset */
case 7: /* DMA Queued */
- case 12: /* FPDMA */
case 15: /* Return Response Info */
default: /* Reserved */
break;
@@ -2947,6 +2947,9 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
goto invalid_fld;
+ /* enable LBA */
+ tf->flags |= ATA_TFLAG_LBA;
+
/*
* 12 and 16 byte CDBs use different offsets to
* provide the various register values.
@@ -2992,6 +2995,10 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->command = cdb[9];
}
+ /* For NCQ commands with FPDMA protocol, copy the tag value */
+ if (tf->protocol == ATA_PROT_NCQ)
+ tf->nsect = qc->tag << 3;
+
/* enforce correct master/slave bit */
tf->device = dev->devno ?
tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index a5088ecb349f..7a21edf89e72 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -604,9 +604,9 @@ static void it821x_display_disk(int n, u8 *buf)
{
unsigned char id[41];
int mode = 0;
- char *mtype = "";
+ const char *mtype = "";
char mbuf[8];
- char *cbl = "(40 wire cable)";
+ const char *cbl = "(40 wire cable)";
static const char *types[5] = {
"RAID0", "RAID1", "RAID 0+1", "JBOD", "DISK"
@@ -903,7 +903,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { NULL, NULL };
- static char *mode[2] = { "pass through", "smart" };
+ static const char *mode[2] = { "pass through", "smart" };
int rc;
rc = pcim_enable_device(pdev);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index b0028588ff1c..e3d4b059fcd1 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1344,6 +1344,7 @@ static struct of_device_id pata_macio_match[] =
},
{},
};
+MODULE_DEVICE_TABLE(of, pata_macio_match);
static struct macio_driver pata_macio_driver =
{
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index c36b3e6531d8..f6c46e9a4dc0 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -24,79 +24,36 @@
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/pxa-dma.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <scsi/scsi_host.h>
-#include <mach/pxa2xx-regs.h>
#include <linux/platform_data/ata-pxa.h>
-#include <mach/dma.h>
#define DRV_NAME "pata_pxa"
#define DRV_VERSION "0.1"
struct pata_pxa_data {
- uint32_t dma_channel;
- struct pxa_dma_desc *dma_desc;
- dma_addr_t dma_desc_addr;
- uint32_t dma_desc_id;
-
- /* DMA IO physical address */
- uint32_t dma_io_addr;
- /* PXA DREQ<0:2> pin selector */
- uint32_t dma_dreq;
- /* DMA DCSR register value */
- uint32_t dma_dcsr;
-
+ struct dma_chan *dma_chan;
+ dma_cookie_t dma_cookie;
struct completion dma_done;
};
/*
- * Setup the DMA descriptors. The size is transfer capped at 4k per descriptor,
- * if the transfer is longer, it is split into multiple chained descriptors.
+ * DMA interrupt handler.
*/
-static void pxa_load_dmac(struct scatterlist *sg, struct ata_queued_cmd *qc)
+static void pxa_ata_dma_irq(void *d)
{
- struct pata_pxa_data *pd = qc->ap->private_data;
-
- uint32_t cpu_len, seg_len;
- dma_addr_t cpu_addr;
-
- cpu_addr = sg_dma_address(sg);
- cpu_len = sg_dma_len(sg);
-
- do {
- seg_len = (cpu_len > 0x1000) ? 0x1000 : cpu_len;
-
- pd->dma_desc[pd->dma_desc_id].ddadr = pd->dma_desc_addr +
- ((pd->dma_desc_id + 1) * sizeof(struct pxa_dma_desc));
-
- pd->dma_desc[pd->dma_desc_id].dcmd = DCMD_BURST32 |
- DCMD_WIDTH2 | (DCMD_LENGTH & seg_len);
-
- if (qc->tf.flags & ATA_TFLAG_WRITE) {
- pd->dma_desc[pd->dma_desc_id].dsadr = cpu_addr;
- pd->dma_desc[pd->dma_desc_id].dtadr = pd->dma_io_addr;
- pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCSRCADDR |
- DCMD_FLOWTRG;
- } else {
- pd->dma_desc[pd->dma_desc_id].dsadr = pd->dma_io_addr;
- pd->dma_desc[pd->dma_desc_id].dtadr = cpu_addr;
- pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCTRGADDR |
- DCMD_FLOWSRC;
- }
-
- cpu_len -= seg_len;
- cpu_addr += seg_len;
- pd->dma_desc_id++;
+ struct pata_pxa_data *pd = d;
+ enum dma_status status;
- } while (cpu_len);
-
- /* Should not happen */
- if (seg_len & 0x1f)
- DALGN |= (1 << pd->dma_dreq);
+ status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
+ if (status == DMA_ERROR || status == DMA_COMPLETE)
+ complete(&pd->dma_done);
}
/*
@@ -105,28 +62,22 @@ static void pxa_load_dmac(struct scatterlist *sg, struct ata_queued_cmd *qc)
static void pxa_qc_prep(struct ata_queued_cmd *qc)
{
struct pata_pxa_data *pd = qc->ap->private_data;
- int si = 0;
- struct scatterlist *sg;
+ struct dma_async_tx_descriptor *tx;
+ enum dma_transfer_direction dir;
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
- pd->dma_desc_id = 0;
-
- DCSR(pd->dma_channel) = 0;
- DALGN &= ~(1 << pd->dma_dreq);
-
- for_each_sg(qc->sg, sg, qc->n_elem, si)
- pxa_load_dmac(sg, qc);
-
- pd->dma_desc[pd->dma_desc_id - 1].ddadr = DDADR_STOP;
-
- /* Fire IRQ only at the end of last block */
- pd->dma_desc[pd->dma_desc_id - 1].dcmd |= DCMD_ENDIRQEN;
-
- DDADR(pd->dma_channel) = pd->dma_desc_addr;
- DRCMR(pd->dma_dreq) = DRCMR_MAPVLD | pd->dma_channel;
-
+ dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
+ tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
+ return;
+ }
+ tx->callback = pxa_ata_dma_irq;
+ tx->callback_param = pd;
+ pd->dma_cookie = dmaengine_submit(tx);
}
/*
@@ -145,7 +96,7 @@ static void pxa_bmdma_start(struct ata_queued_cmd *qc)
{
struct pata_pxa_data *pd = qc->ap->private_data;
init_completion(&pd->dma_done);
- DCSR(pd->dma_channel) = DCSR_RUN;
+ dma_async_issue_pending(pd->dma_chan);
}
/*
@@ -154,12 +105,14 @@ static void pxa_bmdma_start(struct ata_queued_cmd *qc)
static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
{
struct pata_pxa_data *pd = qc->ap->private_data;
+ enum dma_status status;
- if ((DCSR(pd->dma_channel) & DCSR_RUN) &&
- wait_for_completion_timeout(&pd->dma_done, HZ))
- dev_err(qc->ap->dev, "Timeout waiting for DMA completion!");
+ status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
+ if (status != DMA_ERROR && status != DMA_COMPLETE &&
+ wait_for_completion_timeout(&pd->dma_done, HZ))
+ ata_dev_err(qc->dev, "Timeout waiting for DMA completion!");
- DCSR(pd->dma_channel) = 0;
+ dmaengine_terminate_all(pd->dma_chan);
}
/*
@@ -170,8 +123,11 @@ static unsigned char pxa_bmdma_status(struct ata_port *ap)
{
struct pata_pxa_data *pd = ap->private_data;
unsigned char ret = ATA_DMA_INTR;
+ struct dma_tx_state state;
+ enum dma_status status;
- if (pd->dma_dcsr & DCSR_BUSERR)
+ status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, &state);
+ if (status != DMA_COMPLETE)
ret |= ATA_DMA_ERR;
return ret;
@@ -213,21 +169,6 @@ static struct ata_port_operations pxa_ata_port_ops = {
.qc_prep = pxa_qc_prep,
};
-/*
- * DMA interrupt handler.
- */
-static void pxa_ata_dma_irq(int dma, void *port)
-{
- struct ata_port *ap = port;
- struct pata_pxa_data *pd = ap->private_data;
-
- pd->dma_dcsr = DCSR(dma);
- DCSR(dma) = pd->dma_dcsr;
-
- if (pd->dma_dcsr & DCSR_STOPSTATE)
- complete(&pd->dma_done);
-}
-
static int pxa_ata_probe(struct platform_device *pdev)
{
struct ata_host *host;
@@ -238,6 +179,9 @@ static int pxa_ata_probe(struct platform_device *pdev)
struct resource *dma_res;
struct resource *irq_res;
struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct dma_slave_config config;
+ dma_cap_mask_t mask;
+ struct pxad_param param;
int ret = 0;
/*
@@ -333,29 +277,32 @@ static int pxa_ata_probe(struct platform_device *pdev)
return -ENOMEM;
ap->private_data = data;
- data->dma_dreq = pdata->dma_dreq;
- data->dma_io_addr = dma_res->start;
- /*
- * Allocate space for the DMA descriptors
- */
- data->dma_desc = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE,
- &data->dma_desc_addr, GFP_KERNEL);
- if (!data->dma_desc)
- return -EINVAL;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ param.prio = PXAD_PRIO_LOWEST;
+ param.drcmr = pdata->dma_dreq;
+ memset(&config, 0, sizeof(config));
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ config.src_addr = dma_res->start;
+ config.dst_addr = dma_res->start;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
/*
* Request the DMA channel
*/
- data->dma_channel = pxa_request_dma(DRV_NAME, DMA_PRIO_LOW,
- pxa_ata_dma_irq, ap);
- if (data->dma_channel < 0)
+ data->dma_chan =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, &pdev->dev, "data");
+ if (!data->dma_chan)
return -EBUSY;
-
- /*
- * Stop and clear the DMA channel
- */
- DCSR(data->dma_channel) = 0;
+ ret = dmaengine_slave_config(data->dma_chan, &config);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "dma configuration failed: %d\n", ret);
+ return ret;
+ }
/*
* Activate the ATA host
@@ -363,7 +310,7 @@ static int pxa_ata_probe(struct platform_device *pdev)
ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
pdata->irq_flags, &pxa_ata_sht);
if (ret)
- pxa_free_dma(data->dma_channel);
+ dma_release_channel(data->dma_chan);
return ret;
}
@@ -373,7 +320,7 @@ static int pxa_ata_remove(struct platform_device *pdev)
struct ata_host *host = platform_get_drvdata(pdev);
struct pata_pxa_data *data = host->ports[0]->private_data;
- pxa_free_dma(data->dma_channel);
+ dma_release_channel(data->dma_chan);
ata_host_detach(host);
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index cbb5a471eb9d..f6facd686f94 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -70,7 +70,7 @@ struct s3c_ide_info {
struct clk *clk;
void __iomem *ide_addr;
void __iomem *sfr_addr;
- unsigned int irq;
+ int irq;
enum s3c_cpu_type cpu_type;
unsigned int fifo_status_reg;
};
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index a7dfdf9f15ba..80e298870388 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1353,6 +1353,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
return ret;
}
+EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
/**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
@@ -1400,6 +1401,7 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
/* Default device callbacks for generic PM domains. */
diff --git a/drivers/char/tpm/st33zp24/Kconfig b/drivers/char/tpm/st33zp24/Kconfig
index 09cb727864f0..19c007461d1c 100644
--- a/drivers/char/tpm/st33zp24/Kconfig
+++ b/drivers/char/tpm/st33zp24/Kconfig
@@ -1,6 +1,6 @@
config TCG_TIS_ST33ZP24
tristate "STMicroelectronics TPM Interface Specification 1.2 Interface"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
---help---
STMicroelectronics ST33ZP24 core driver. It implements the core
TPM1.2 logic and hooks into the TPM kernel APIs. Physical layers will
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index ad1ee180e0c2..309d2767c6a1 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -258,7 +258,6 @@ static SIMPLE_DEV_PM_OPS(st33zp24_i2c_ops, st33zp24_pm_suspend,
static struct i2c_driver st33zp24_i2c_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = TPM_ST33_I2C,
.pm = &st33zp24_i2c_ops,
.of_match_table = of_match_ptr(of_st33zp24_i2c_match),
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index f0184a1b0c1c..f974c945c97a 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -381,7 +381,6 @@ static SIMPLE_DEV_PM_OPS(st33zp24_spi_ops, st33zp24_pm_suspend,
static struct spi_driver tpm_st33_spi_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = TPM_ST33_SPI,
.pm = &st33zp24_spi_ops,
.of_match_table = of_match_ptr(of_st33zp24_spi_match),
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 1082d4bb016a..f26b0ae23bea 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -119,6 +119,9 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
chip->dev.class = tpm_class;
chip->dev.release = tpm_dev_release;
chip->dev.parent = chip->pdev;
+#ifdef CONFIG_ACPI
+ chip->dev.groups = chip->groups;
+#endif
if (chip->dev_num == 0)
chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR);
@@ -182,12 +185,6 @@ static int tpm1_chip_register(struct tpm_chip *chip)
if (rc)
return rc;
- rc = tpm_add_ppi(chip);
- if (rc) {
- tpm_sysfs_del_device(chip);
- return rc;
- }
-
chip->bios_dir = tpm_bios_log_setup(chip->devname);
return 0;
@@ -201,8 +198,6 @@ static void tpm1_chip_unregister(struct tpm_chip *chip)
if (chip->bios_dir)
tpm_bios_log_teardown(chip->bios_dir);
- tpm_remove_ppi(chip);
-
tpm_sysfs_del_device(chip);
}
@@ -225,10 +220,20 @@ int tpm_chip_register(struct tpm_chip *chip)
if (rc)
return rc;
+ tpm_add_ppi(chip);
+
rc = tpm_dev_add_device(chip);
if (rc)
goto out_err;
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+ rc = __compat_only_sysfs_link_entry_to_kobj(&chip->pdev->kobj,
+ &chip->dev.kobj,
+ "ppi");
+ if (rc)
+ goto out_err;
+ }
+
/* Make the chip available. */
spin_lock(&driver_lock);
list_add_rcu(&chip->list, &tpm_chip_list);
@@ -263,6 +268,9 @@ void tpm_chip_unregister(struct tpm_chip *chip)
spin_unlock(&driver_lock);
synchronize_rcu();
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ sysfs_remove_link(&chip->pdev->kobj, "ppi");
+
tpm1_chip_unregister(chip);
tpm_dev_del_device(chip);
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index e85d3416d899..c50637db3a8a 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -666,6 +666,30 @@ int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
}
/**
+ * tpm_is_tpm2 - is the chip a TPM2 chip?
+ * @chip_num: tpm idx # or ANY
+ *
+ * Returns < 0 on error, and 1 or 0 on success depending whether the chip
+ * is a TPM2 chip.
+ */
+int tpm_is_tpm2(u32 chip_num)
+{
+ struct tpm_chip *chip;
+ int rc;
+
+ chip = tpm_chip_find_get(chip_num);
+ if (chip == NULL)
+ return -ENODEV;
+
+ rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
+
+ tpm_chip_put(chip);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_is_tpm2);
+
+/**
* tpm_pcr_read - read a pcr value
* @chip_num: tpm idx # or ANY
* @pcr_idx: pcr idx to retrieve
@@ -1021,6 +1045,58 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
}
EXPORT_SYMBOL_GPL(tpm_get_random);
+/**
+ * tpm_seal_trusted() - seal a trusted key
+ * @chip_num: A specific chip number for the request or TPM_ANY_NUM
+ * @options: authentication values and other options
+ * @payload: the key data in clear and encrypted form
+ *
+ * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
+ * are supported.
+ */
+int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ struct tpm_chip *chip;
+ int rc;
+
+ chip = tpm_chip_find_get(chip_num);
+ if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
+ return -ENODEV;
+
+ rc = tpm2_seal_trusted(chip, payload, options);
+
+ tpm_chip_put(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_seal_trusted);
+
+/**
+ * tpm_unseal_trusted() - unseal a trusted key
+ * @chip_num: A specific chip number for the request or TPM_ANY_NUM
+ * @options: authentication values and other options
+ * @payload: the key data in clear and encrypted form
+ *
+ * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
+ * are supported.
+ */
+int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ struct tpm_chip *chip;
+ int rc;
+
+ chip = tpm_chip_find_get(chip_num);
+ if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
+ return -ENODEV;
+
+ rc = tpm2_unseal_trusted(chip, payload, options);
+
+ tpm_chip_put(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_unseal_trusted);
+
static int __init tpm_init(void)
{
int rc;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index f8319a0860fd..a4257a32964f 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2015 Intel Corporation
*
* Authors:
* Leendert van Doorn <leendert@watson.ibm.com>
@@ -28,6 +29,7 @@
#include <linux/tpm.h>
#include <linux/acpi.h>
#include <linux/cdev.h>
+#include <linux/highmem.h>
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
@@ -88,6 +90,9 @@ enum tpm2_return_codes {
enum tpm2_algorithms {
TPM2_ALG_SHA1 = 0x0004,
+ TPM2_ALG_KEYEDHASH = 0x0008,
+ TPM2_ALG_SHA256 = 0x000B,
+ TPM2_ALG_NULL = 0x0010
};
enum tpm2_command_codes {
@@ -95,6 +100,10 @@ enum tpm2_command_codes {
TPM2_CC_SELF_TEST = 0x0143,
TPM2_CC_STARTUP = 0x0144,
TPM2_CC_SHUTDOWN = 0x0145,
+ TPM2_CC_CREATE = 0x0153,
+ TPM2_CC_LOAD = 0x0157,
+ TPM2_CC_UNSEAL = 0x015E,
+ TPM2_CC_FLUSH_CONTEXT = 0x0165,
TPM2_CC_GET_CAPABILITY = 0x017A,
TPM2_CC_GET_RANDOM = 0x017B,
TPM2_CC_PCR_READ = 0x017E,
@@ -115,6 +124,13 @@ enum tpm2_startup_types {
TPM2_SU_STATE = 0x0001,
};
+enum tpm2_start_method {
+ TPM2_START_ACPI = 2,
+ TPM2_START_FIFO = 6,
+ TPM2_START_CRB = 7,
+ TPM2_START_CRB_WITH_ACPI = 8,
+};
+
struct tpm_chip;
struct tpm_vendor_specific {
@@ -151,8 +167,7 @@ struct tpm_vendor_specific {
enum tpm_chip_flags {
TPM_CHIP_FLAG_REGISTERED = BIT(0),
- TPM_CHIP_FLAG_PPI = BIT(1),
- TPM_CHIP_FLAG_TPM2 = BIT(2),
+ TPM_CHIP_FLAG_TPM2 = BIT(1),
};
struct tpm_chip {
@@ -175,6 +190,8 @@ struct tpm_chip {
struct dentry **bios_dir;
#ifdef CONFIG_ACPI
+ const struct attribute_group *groups[2];
+ unsigned int groups_cnt;
acpi_handle acpi_dev_handle;
char ppi_version[TPM_PPI_VERSION_LEN + 1];
#endif /* CONFIG_ACPI */
@@ -182,7 +199,7 @@ struct tpm_chip {
struct list_head list;
};
-#define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor)
+#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
static inline void tpm_chip_put(struct tpm_chip *chip)
{
@@ -382,6 +399,101 @@ struct tpm_cmd_t {
tpm_cmd_params params;
} __packed;
+/* A string buffer type for constructing TPM commands. This is based on the
+ * ideas of string buffer code in security/keys/trusted.h but is heap based
+ * in order to keep the stack usage minimal.
+ */
+
+enum tpm_buf_flags {
+ TPM_BUF_OVERFLOW = BIT(0),
+};
+
+struct tpm_buf {
+ struct page *data_page;
+ unsigned int flags;
+ u8 *data;
+};
+
+static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+ struct tpm_input_header *head;
+
+ buf->data_page = alloc_page(GFP_HIGHUSER);
+ if (!buf->data_page)
+ return -ENOMEM;
+
+ buf->flags = 0;
+ buf->data = kmap(buf->data_page);
+
+ head = (struct tpm_input_header *) buf->data;
+
+ head->tag = cpu_to_be16(tag);
+ head->length = cpu_to_be32(sizeof(*head));
+ head->ordinal = cpu_to_be32(ordinal);
+
+ return 0;
+}
+
+static inline void tpm_buf_destroy(struct tpm_buf *buf)
+{
+ kunmap(buf->data_page);
+ __free_page(buf->data_page);
+}
+
+static inline u32 tpm_buf_length(struct tpm_buf *buf)
+{
+ struct tpm_input_header *head = (struct tpm_input_header *) buf->data;
+
+ return be32_to_cpu(head->length);
+}
+
+static inline u16 tpm_buf_tag(struct tpm_buf *buf)
+{
+ struct tpm_input_header *head = (struct tpm_input_header *) buf->data;
+
+ return be16_to_cpu(head->tag);
+}
+
+static inline void tpm_buf_append(struct tpm_buf *buf,
+ const unsigned char *new_data,
+ unsigned int new_len)
+{
+ struct tpm_input_header *head = (struct tpm_input_header *) buf->data;
+ u32 len = tpm_buf_length(buf);
+
+ /* Return silently if overflow has already happened. */
+ if (buf->flags & TPM_BUF_OVERFLOW)
+ return;
+
+ if ((len + new_len) > PAGE_SIZE) {
+ WARN(1, "tpm_buf: overflow\n");
+ buf->flags |= TPM_BUF_OVERFLOW;
+ return;
+ }
+
+ memcpy(&buf->data[len], new_data, new_len);
+ head->length = cpu_to_be32(len + new_len);
+}
+
+static inline void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
+{
+ tpm_buf_append(buf, &value, 1);
+}
+
+static inline void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
+{
+ __be16 value2 = cpu_to_be16(value);
+
+ tpm_buf_append(buf, (u8 *) &value2, 2);
+}
+
+static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
+{
+ __be32 value2 = cpu_to_be32(value);
+
+ tpm_buf_append(buf, (u8 *) &value2, 4);
+}
+
extern struct class *tpm_class;
extern dev_t tpm_devt;
extern const struct file_operations tpm_fops;
@@ -412,15 +524,9 @@ void tpm_sysfs_del_device(struct tpm_chip *chip);
int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
#ifdef CONFIG_ACPI
-extern int tpm_add_ppi(struct tpm_chip *chip);
-extern void tpm_remove_ppi(struct tpm_chip *chip);
+extern void tpm_add_ppi(struct tpm_chip *chip);
#else
-static inline int tpm_add_ppi(struct tpm_chip *chip)
-{
- return 0;
-}
-
-static inline void tpm_remove_ppi(struct tpm_chip *chip)
+static inline void tpm_add_ppi(struct tpm_chip *chip)
{
}
#endif
@@ -428,6 +534,12 @@ static inline void tpm_remove_ppi(struct tpm_chip *chip)
int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash);
int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max);
+int tpm2_seal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options);
+int tpm2_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options);
ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
u32 *value, const char *desc);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 011909a9be96..bd7039fafa8a 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Intel Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
*
* Authors:
* Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
@@ -16,6 +16,11 @@
*/
#include "tpm.h"
+#include <keys/trusted-type.h>
+
+enum tpm2_object_attributes {
+ TPM2_ATTR_USER_WITH_AUTH = BIT(6),
+};
struct tpm2_startup_in {
__be16 startup_type;
@@ -381,6 +386,249 @@ static const struct tpm_input_header tpm2_get_tpm_pt_header = {
};
/**
+ * Append TPMS_AUTH_COMMAND to the buffer. The buffer must be allocated with
+ * tpm_buf_alloc().
+ *
+ * @param buf: an allocated tpm_buf instance
+ * @param nonce: the session nonce, may be NULL if not used
+ * @param nonce_len: the session nonce length, may be 0 if not used
+ * @param attributes: the session attributes
+ * @param hmac: the session HMAC or password, may be NULL if not used
+ * @param hmac_len: the session HMAC or password length, maybe 0 if not used
+ */
+static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
+ const u8 *nonce, u16 nonce_len,
+ u8 attributes,
+ const u8 *hmac, u16 hmac_len)
+{
+ tpm_buf_append_u32(buf, 9 + nonce_len + hmac_len);
+ tpm_buf_append_u32(buf, session_handle);
+ tpm_buf_append_u16(buf, nonce_len);
+
+ if (nonce && nonce_len)
+ tpm_buf_append(buf, nonce, nonce_len);
+
+ tpm_buf_append_u8(buf, attributes);
+ tpm_buf_append_u16(buf, hmac_len);
+
+ if (hmac && hmac_len)
+ tpm_buf_append(buf, hmac, hmac_len);
+}
+
+/**
+ * tpm2_seal_trusted() - seal a trusted key
+ * @chip_num: A specific chip number for the request or TPM_ANY_NUM
+ * @options: authentication values and other options
+ * @payload: the key data in clear and encrypted form
+ *
+ * Returns < 0 on error and 0 on success.
+ */
+int tpm2_seal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ unsigned int blob_len;
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, options->keyhandle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->keyauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ /* sensitive */
+ tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len);
+
+ tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
+ tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
+ tpm_buf_append_u16(&buf, payload->key_len);
+ tpm_buf_append(&buf, payload->key, payload->key_len);
+
+ /* public */
+ tpm_buf_append_u16(&buf, 14);
+
+ tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH);
+ tpm_buf_append_u16(&buf, TPM2_ALG_SHA256);
+ tpm_buf_append_u32(&buf, TPM2_ATTR_USER_WITH_AUTH);
+ tpm_buf_append_u16(&buf, 0); /* policy digest size */
+ tpm_buf_append_u16(&buf, TPM2_ALG_NULL);
+ tpm_buf_append_u16(&buf, 0);
+
+ /* outside info */
+ tpm_buf_append_u16(&buf, 0);
+
+ /* creation PCR */
+ tpm_buf_append_u32(&buf, 0);
+
+ if (buf.flags & TPM_BUF_OVERFLOW) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "sealing data");
+ if (rc)
+ goto out;
+
+ blob_len = be32_to_cpup((__be32 *) &buf.data[TPM_HEADER_SIZE]);
+ if (blob_len > MAX_BLOB_SIZE) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ memcpy(payload->blob, &buf.data[TPM_HEADER_SIZE + 4], blob_len);
+ payload->blob_len = blob_len;
+
+out:
+ tpm_buf_destroy(&buf);
+
+ if (rc > 0)
+ rc = -EPERM;
+
+ return rc;
+}
+
+static int tpm2_load(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 *blob_handle)
+{
+ struct tpm_buf buf;
+ unsigned int private_len;
+ unsigned int public_len;
+ unsigned int blob_len;
+ int rc;
+
+ private_len = be16_to_cpup((__be16 *) &payload->blob[0]);
+ if (private_len > (payload->blob_len - 2))
+ return -E2BIG;
+
+ public_len = be16_to_cpup((__be16 *) &payload->blob[2 + private_len]);
+ blob_len = private_len + public_len + 4;
+ if (blob_len > payload->blob_len)
+ return -E2BIG;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_LOAD);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, options->keyhandle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->keyauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ tpm_buf_append(&buf, payload->blob, blob_len);
+
+ if (buf.flags & TPM_BUF_OVERFLOW) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "loading blob");
+ if (!rc)
+ *blob_handle = be32_to_cpup(
+ (__be32 *) &buf.data[TPM_HEADER_SIZE]);
+
+out:
+ tpm_buf_destroy(&buf);
+
+ if (rc > 0)
+ rc = -EPERM;
+
+ return rc;
+}
+
+static void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT);
+ if (rc) {
+ dev_warn(chip->pdev, "0x%08x was not flushed, out of memory\n",
+ handle);
+ return;
+ }
+
+ tpm_buf_append_u32(&buf, handle);
+
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "flushing context");
+ if (rc)
+ dev_warn(chip->pdev, "0x%08x was not flushed, rc=%d\n", handle,
+ rc);
+
+ tpm_buf_destroy(&buf);
+}
+
+static int tpm2_unseal(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 blob_handle)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, blob_handle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->blobauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "unsealing");
+ if (rc > 0)
+ rc = -EPERM;
+
+ if (!rc) {
+ payload->key_len = be16_to_cpup(
+ (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+
+ memcpy(payload->key, &buf.data[TPM_HEADER_SIZE + 6],
+ payload->key_len);
+ }
+
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+/**
+ * tpm_unseal_trusted() - unseal a trusted key
+ * @chip_num: A specific chip number for the request or TPM_ANY_NUM
+ * @options: authentication values and other options
+ * @payload: the key data in clear and encrypted form
+ *
+ * Returns < 0 on error and 0 on success.
+ */
+int tpm2_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ u32 blob_handle;
+ int rc;
+
+ rc = tpm2_load(chip, payload, options, &blob_handle);
+ if (rc)
+ return rc;
+
+ rc = tpm2_unseal(chip, payload, options, blob_handle);
+
+ tpm2_flush_context(chip, blob_handle);
+
+ return rc;
+}
+
+/**
* tpm2_get_tpm_pt() - get value of a TPM_CAP_TPM_PROPERTIES type property
* @chip: TPM chip to use.
* @property_id: property ID.
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 1267322595da..4bb9727c1047 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -34,12 +34,6 @@ enum crb_defaults {
CRB_ACPI_START_INDEX = 1,
};
-enum crb_start_method {
- CRB_SM_ACPI_START = 2,
- CRB_SM_CRB = 7,
- CRB_SM_CRB_WITH_ACPI_START = 8,
-};
-
struct acpi_tpm2 {
struct acpi_table_header hdr;
u16 platform_class;
@@ -74,7 +68,8 @@ struct crb_control_area {
u32 int_enable;
u32 int_sts;
u32 cmd_size;
- u64 cmd_pa;
+ u32 cmd_pa_low;
+ u32 cmd_pa_high;
u32 rsp_size;
u64 rsp_pa;
} __packed;
@@ -220,12 +215,6 @@ static int crb_acpi_add(struct acpi_device *device)
u64 pa;
int rc;
- chip = tpmm_chip_alloc(dev, &tpm_crb);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
-
- chip->flags = TPM_CHIP_FLAG_TPM2;
-
status = acpi_get_table(ACPI_SIG_TPM2, 1,
(struct acpi_table_header **) &buf);
if (ACPI_FAILURE(status)) {
@@ -233,13 +222,15 @@ static int crb_acpi_add(struct acpi_device *device)
return -ENODEV;
}
- /* At least some versions of AMI BIOS have a bug that TPM2 table has
- * zero address for the control area and therefore we must fail.
- */
- if (!buf->control_area_pa) {
- dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n");
- return -EINVAL;
- }
+ /* Should the FIFO driver handle this? */
+ if (buf->start_method == TPM2_START_FIFO)
+ return -ENODEV;
+
+ chip = tpmm_chip_alloc(dev, &tpm_crb);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ chip->flags = TPM_CHIP_FLAG_TPM2;
if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
dev_err(dev, "TPM2 ACPI table has wrong size");
@@ -259,11 +250,11 @@ static int crb_acpi_add(struct acpi_device *device)
* report only ACPI start but in practice seems to require both
* ACPI start and CRB start.
*/
- if (sm == CRB_SM_CRB || sm == CRB_SM_CRB_WITH_ACPI_START ||
+ if (sm == TPM2_START_CRB || sm == TPM2_START_FIFO ||
!strcmp(acpi_device_hid(device), "MSFT0101"))
priv->flags |= CRB_FL_CRB_START;
- if (sm == CRB_SM_ACPI_START || sm == CRB_SM_CRB_WITH_ACPI_START)
+ if (sm == TPM2_START_ACPI || sm == TPM2_START_CRB_WITH_ACPI)
priv->flags |= CRB_FL_ACPI_START;
priv->cca = (struct crb_control_area __iomem *)
@@ -273,8 +264,8 @@ static int crb_acpi_add(struct acpi_device *device)
return -ENOMEM;
}
- memcpy_fromio(&pa, &priv->cca->cmd_pa, 8);
- pa = le64_to_cpu(pa);
+ pa = ((u64) le32_to_cpu(ioread32(&priv->cca->cmd_pa_high)) << 32) |
+ (u64) le32_to_cpu(ioread32(&priv->cca->cmd_pa_low));
priv->cmd = devm_ioremap_nocache(dev, pa,
ioread32(&priv->cca->cmd_size));
if (!priv->cmd) {
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index 3a56a131586c..bd72fb04225e 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -76,15 +76,25 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
void *addr = log->bios_event_log;
void *limit = log->bios_event_log_end;
struct tcpa_event *event;
+ u32 converted_event_size;
+ u32 converted_event_type;
+
/* read over *pos measurements */
for (i = 0; i < *pos; i++) {
event = addr;
+ converted_event_size =
+ do_endian_conversion(event->event_size);
+ converted_event_type =
+ do_endian_conversion(event->event_type);
+
if ((addr + sizeof(struct tcpa_event)) < limit) {
- if (event->event_type == 0 && event->event_size == 0)
+ if ((converted_event_type == 0) &&
+ (converted_event_size == 0))
return NULL;
- addr += sizeof(struct tcpa_event) + event->event_size;
+ addr += (sizeof(struct tcpa_event) +
+ converted_event_size);
}
}
@@ -94,8 +104,12 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
event = addr;
- if ((event->event_type == 0 && event->event_size == 0) ||
- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
+ converted_event_size = do_endian_conversion(event->event_size);
+ converted_event_type = do_endian_conversion(event->event_type);
+
+ if (((converted_event_type == 0) && (converted_event_size == 0))
+ || ((addr + sizeof(struct tcpa_event) + converted_event_size)
+ >= limit))
return NULL;
return addr;
@@ -107,8 +121,12 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
struct tcpa_event *event = v;
struct tpm_bios_log *log = m->private;
void *limit = log->bios_event_log_end;
+ u32 converted_event_size;
+ u32 converted_event_type;
- v += sizeof(struct tcpa_event) + event->event_size;
+ converted_event_size = do_endian_conversion(event->event_size);
+
+ v += sizeof(struct tcpa_event) + converted_event_size;
/* now check if current entry is valid */
if ((v + sizeof(struct tcpa_event)) >= limit)
@@ -116,11 +134,11 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
event = v;
- if (event->event_type == 0 && event->event_size == 0)
- return NULL;
+ converted_event_size = do_endian_conversion(event->event_size);
+ converted_event_type = do_endian_conversion(event->event_type);
- if ((event->event_type == 0 && event->event_size == 0) ||
- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
+ if (((converted_event_type == 0) && (converted_event_size == 0)) ||
+ ((v + sizeof(struct tcpa_event) + converted_event_size) >= limit))
return NULL;
(*pos)++;
@@ -140,7 +158,7 @@ static int get_event_name(char *dest, struct tcpa_event *event,
int i, n_len = 0, d_len = 0;
struct tcpa_pc_event *pc_event;
- switch(event->event_type) {
+ switch (do_endian_conversion(event->event_type)) {
case PREBOOT:
case POST_CODE:
case UNUSED:
@@ -156,14 +174,16 @@ static int get_event_name(char *dest, struct tcpa_event *event,
case NONHOST_CODE:
case NONHOST_CONFIG:
case NONHOST_INFO:
- name = tcpa_event_type_strings[event->event_type];
+ name = tcpa_event_type_strings[do_endian_conversion
+ (event->event_type)];
n_len = strlen(name);
break;
case SEPARATOR:
case ACTION:
- if (MAX_TEXT_EVENT > event->event_size) {
+ if (MAX_TEXT_EVENT >
+ do_endian_conversion(event->event_size)) {
name = event_entry;
- n_len = event->event_size;
+ n_len = do_endian_conversion(event->event_size);
}
break;
case EVENT_TAG:
@@ -171,7 +191,7 @@ static int get_event_name(char *dest, struct tcpa_event *event,
/* ToDo Row data -> Base64 */
- switch (pc_event->event_id) {
+ switch (do_endian_conversion(pc_event->event_id)) {
case SMBIOS:
case BIS_CERT:
case CMOS:
@@ -179,7 +199,8 @@ static int get_event_name(char *dest, struct tcpa_event *event,
case OPTION_ROM_EXEC:
case OPTION_ROM_CONFIG:
case S_CRTM_VERSION:
- name = tcpa_pc_event_id_strings[pc_event->event_id];
+ name = tcpa_pc_event_id_strings[do_endian_conversion
+ (pc_event->event_id)];
n_len = strlen(name);
break;
/* hash data */
@@ -188,7 +209,8 @@ static int get_event_name(char *dest, struct tcpa_event *event,
case OPTION_ROM_MICROCODE:
case S_CRTM_CONTENTS:
case POST_CONTENTS:
- name = tcpa_pc_event_id_strings[pc_event->event_id];
+ name = tcpa_pc_event_id_strings[do_endian_conversion
+ (pc_event->event_id)];
n_len = strlen(name);
for (i = 0; i < 20; i++)
d_len += sprintf(&data[2*i], "%02x",
@@ -209,13 +231,24 @@ static int get_event_name(char *dest, struct tcpa_event *event,
static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
{
struct tcpa_event *event = v;
- char *data = v;
+ struct tcpa_event temp_event;
+ char *tempPtr;
int i;
- for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
- seq_putc(m, data[i]);
+ memcpy(&temp_event, event, sizeof(struct tcpa_event));
+
+ /* convert raw integers for endianness */
+ temp_event.pcr_index = do_endian_conversion(event->pcr_index);
+ temp_event.event_type = do_endian_conversion(event->event_type);
+ temp_event.event_size = do_endian_conversion(event->event_size);
+
+ tempPtr = (char *)&temp_event;
+
+ for (i = 0; i < sizeof(struct tcpa_event) + temp_event.event_size; i++)
+ seq_putc(m, tempPtr[i]);
return 0;
+
}
static int tpm_bios_measurements_release(struct inode *inode,
@@ -238,7 +271,7 @@ static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
char *eventname;
struct tcpa_event *event = v;
unsigned char *event_entry =
- (unsigned char *) (v + sizeof(struct tcpa_event));
+ (unsigned char *)(v + sizeof(struct tcpa_event));
eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
if (!eventname) {
@@ -247,13 +280,14 @@ static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
return -EFAULT;
}
- seq_printf(m, "%2d ", event->pcr_index);
+ /* 1st: PCR */
+ seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index));
/* 2nd: SHA1 */
seq_printf(m, "%20phN", event->pcr_value);
/* 3rd: event type identifier */
- seq_printf(m, " %02x", event->event_type);
+ seq_printf(m, " %02x", do_endian_conversion(event->event_type));
len += get_event_name(eventname, event, event_entry);
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
index e7da086d6928..267bfbd1b7bb 100644
--- a/drivers/char/tpm/tpm_eventlog.h
+++ b/drivers/char/tpm/tpm_eventlog.h
@@ -6,6 +6,12 @@
#define MAX_TEXT_EVENT 1000 /* Max event string length */
#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
+#ifdef CONFIG_PPC64
+#define do_endian_conversion(x) be32_to_cpu(x)
+#else
+#define do_endian_conversion(x) x
+#endif
+
enum bios_platform_class {
BIOS_CLIENT = 0x00,
BIOS_SERVER = 0x01,
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 7a0ca78ad3c6..8dfb88b9739c 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -217,7 +217,6 @@ static struct i2c_driver i2c_atmel_driver = {
.remove = i2c_atmel_remove,
.driver = {
.name = I2C_DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &i2c_atmel_pm_ops,
.of_match_table = of_match_ptr(i2c_atmel_of_match),
},
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 33c5f360ab01..63d5d22e9e60 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -711,7 +711,6 @@ static struct i2c_driver tpm_tis_i2c_driver = {
.remove = tpm_tis_i2c_remove,
.driver = {
.name = "tpm_i2c_infineon",
- .owner = THIS_MODULE,
.pm = &tpm_tis_i2c_ops,
.of_match_table = of_match_ptr(tpm_tis_i2c_of_match),
},
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 9d42b7d78e50..847f1597fe9b 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -641,7 +641,6 @@ static struct i2c_driver i2c_nuvoton_driver = {
.remove = i2c_nuvoton_remove,
.driver = {
.name = I2C_DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &i2c_nuvoton_pm_ops,
.of_match_table = of_match_ptr(i2c_nuvoton_of_match),
},
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 27ebf9511cb4..3e6a22658b63 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -491,7 +491,7 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
}
ibmvtpm->rtce_size = be16_to_cpu(crq->len);
ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!ibmvtpm->rtce_buf) {
dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
return;
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_of.c
index eebe6256918f..1141456a4b1f 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_of.c
@@ -24,14 +24,14 @@ int read_log(struct tpm_bios_log *log)
{
struct device_node *np;
const u32 *sizep;
- const __be64 *basep;
+ const u64 *basep;
if (log->bios_event_log != NULL) {
pr_err("%s: ERROR - Eventlog already initialized\n", __func__);
return -EFAULT;
}
- np = of_find_node_by_name(NULL, "ibm,vtpm");
+ np = of_find_node_by_name(NULL, "vtpm");
if (!np) {
pr_err("%s: ERROR - IBMVTPM not supported\n", __func__);
return -ENODEV;
@@ -63,7 +63,7 @@ int read_log(struct tpm_bios_log *log)
log->bios_event_log_end = log->bios_event_log + *sizep;
- memcpy(log->bios_event_log, __va(be64_to_cpup(basep)), *sizep);
+ memcpy(log->bios_event_log, __va(*basep), *sizep);
return 0;
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 6ca9b5d78144..692a2c6ae036 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -53,7 +53,7 @@ tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type,
static ssize_t tpm_show_ppi_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = to_tpm_chip(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version);
}
@@ -63,7 +63,7 @@ static ssize_t tpm_show_ppi_request(struct device *dev,
{
ssize_t size = -EINVAL;
union acpi_object *obj;
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = to_tpm_chip(dev);
obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETREQ,
ACPI_TYPE_PACKAGE, NULL);
@@ -100,7 +100,7 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
int func = TPM_PPI_FN_SUBREQ;
union acpi_object *obj, tmp;
union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = to_tpm_chip(dev);
/*
* the function to submit TPM operation request to pre-os environment
@@ -156,7 +156,7 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev,
.buffer.length = 0,
.buffer.pointer = NULL
};
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = to_tpm_chip(dev);
static char *info[] = {
"None",
@@ -197,7 +197,7 @@ static ssize_t tpm_show_ppi_response(struct device *dev,
acpi_status status = -EINVAL;
union acpi_object *obj, *ret_obj;
u64 req, res;
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = to_tpm_chip(dev);
obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETRSP,
ACPI_TYPE_PACKAGE, NULL);
@@ -296,7 +296,7 @@ static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = to_tpm_chip(dev);
return show_ppi_operations(chip->acpi_dev_handle, buf, 0,
PPI_TPM_REQ_MAX);
@@ -306,7 +306,7 @@ static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = to_tpm_chip(dev);
return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START,
PPI_VS_REQ_END);
@@ -334,17 +334,16 @@ static struct attribute_group ppi_attr_grp = {
.attrs = ppi_attrs
};
-int tpm_add_ppi(struct tpm_chip *chip)
+void tpm_add_ppi(struct tpm_chip *chip)
{
union acpi_object *obj;
- int rc;
if (!chip->acpi_dev_handle)
- return 0;
+ return;
if (!acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid,
TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_VERSION))
- return 0;
+ return;
/* Cache PPI version string. */
obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, tpm_ppi_uuid,
@@ -356,16 +355,5 @@ int tpm_add_ppi(struct tpm_chip *chip)
ACPI_FREE(obj);
}
- rc = sysfs_create_group(&chip->pdev->kobj, &ppi_attr_grp);
-
- if (!rc)
- chip->flags |= TPM_CHIP_FLAG_PPI;
-
- return rc;
-}
-
-void tpm_remove_ppi(struct tpm_chip *chip)
-{
- if (chip->flags & TPM_CHIP_FLAG_PPI)
- sysfs_remove_group(&chip->pdev->kobj, &ppi_attr_grp);
+ chip->groups[chip->groups_cnt++] = &ppi_attr_grp;
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index f2dffa770b8e..696ef1d56b4f 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2005, 2006 IBM Corporation
- * Copyright (C) 2014 Intel Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
*
* Authors:
* Leendert van Doorn <leendert@watson.ibm.com>
@@ -28,6 +28,7 @@
#include <linux/wait.h>
#include <linux/acpi.h>
#include <linux/freezer.h>
+#include <acpi/actbl2.h>
#include "tpm.h"
enum tis_access {
@@ -65,6 +66,17 @@ enum tis_defaults {
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
};
+struct tpm_info {
+ unsigned long start;
+ unsigned long len;
+ unsigned int irq;
+};
+
+static struct tpm_info tis_default_info = {
+ .start = TIS_MEM_BASE,
+ .len = TIS_MEM_LEN,
+ .irq = 0,
+};
/* Some timeout values are needed before it is known whether the chip is
* TPM 1.0 or TPM 2.0.
@@ -91,26 +103,54 @@ struct priv_data {
};
#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
-static int is_itpm(struct pnp_dev *dev)
+static int has_hid(struct acpi_device *dev, const char *hid)
{
- struct acpi_device *acpi = pnp_acpi_device(dev);
struct acpi_hardware_id *id;
- if (!acpi)
- return 0;
-
- list_for_each_entry(id, &acpi->pnp.ids, list) {
- if (!strcmp("INTC0102", id->id))
+ list_for_each_entry(id, &dev->pnp.ids, list)
+ if (!strcmp(hid, id->id))
return 1;
- }
return 0;
}
+
+static inline int is_itpm(struct acpi_device *dev)
+{
+ return has_hid(dev, "INTC0102");
+}
+
+static inline int is_fifo(struct acpi_device *dev)
+{
+ struct acpi_table_tpm2 *tbl;
+ acpi_status st;
+
+ /* TPM 1.2 FIFO */
+ if (!has_hid(dev, "MSFT0101"))
+ return 1;
+
+ st = acpi_get_table(ACPI_SIG_TPM2, 1,
+ (struct acpi_table_header **) &tbl);
+ if (ACPI_FAILURE(st)) {
+ dev_err(&dev->dev, "failed to get TPM2 ACPI table\n");
+ return 0;
+ }
+
+ if (le32_to_cpu(tbl->start_method) != TPM2_START_FIFO)
+ return 0;
+
+ /* TPM 2.0 FIFO */
+ return 1;
+}
#else
-static inline int is_itpm(struct pnp_dev *dev)
+static inline int is_itpm(struct acpi_device *dev)
{
return 0;
}
+
+static inline int is_fifo(struct acpi_device *dev)
+{
+ return 1;
+}
#endif
/* Before we attempt to access the TPM we must see that the valid bit is set.
@@ -600,9 +640,8 @@ static void tpm_tis_remove(struct tpm_chip *chip)
release_locality(chip, chip->vendor.locality, 1);
}
-static int tpm_tis_init(struct device *dev, acpi_handle acpi_dev_handle,
- resource_size_t start, resource_size_t len,
- unsigned int irq)
+static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
+ acpi_handle acpi_dev_handle)
{
u32 vendor, intfcaps, intmask;
int rc, i, irq_s, irq_e, probe;
@@ -622,7 +661,7 @@ static int tpm_tis_init(struct device *dev, acpi_handle acpi_dev_handle,
chip->acpi_dev_handle = acpi_dev_handle;
#endif
- chip->vendor.iobase = devm_ioremap(dev, start, len);
+ chip->vendor.iobase = devm_ioremap(dev, tpm_info->start, tpm_info->len);
if (!chip->vendor.iobase)
return -EIO;
@@ -707,7 +746,7 @@ static int tpm_tis_init(struct device *dev, acpi_handle acpi_dev_handle,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
if (interrupts)
- chip->vendor.irq = irq;
+ chip->vendor.irq = tpm_info->irq;
if (interrupts && !chip->vendor.irq) {
irq_s =
ioread8(chip->vendor.iobase +
@@ -890,27 +929,27 @@ static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
const struct pnp_device_id *pnp_id)
{
- resource_size_t start, len;
- unsigned int irq = 0;
+ struct tpm_info tpm_info = tis_default_info;
acpi_handle acpi_dev_handle = NULL;
- start = pnp_mem_start(pnp_dev, 0);
- len = pnp_mem_len(pnp_dev, 0);
+ tpm_info.start = pnp_mem_start(pnp_dev, 0);
+ tpm_info.len = pnp_mem_len(pnp_dev, 0);
if (pnp_irq_valid(pnp_dev, 0))
- irq = pnp_irq(pnp_dev, 0);
+ tpm_info.irq = pnp_irq(pnp_dev, 0);
else
interrupts = false;
- if (is_itpm(pnp_dev))
- itpm = true;
-
#ifdef CONFIG_ACPI
- if (pnp_acpi_device(pnp_dev))
+ if (pnp_acpi_device(pnp_dev)) {
+ if (is_itpm(pnp_acpi_device(pnp_dev)))
+ itpm = true;
+
acpi_dev_handle = pnp_acpi_device(pnp_dev)->handle;
+ }
#endif
- return tpm_tis_init(&pnp_dev->dev, acpi_dev_handle, start, len, irq);
+ return tpm_tis_init(&pnp_dev->dev, &tpm_info, acpi_dev_handle);
}
static struct pnp_device_id tpm_pnp_tbl[] = {
@@ -930,6 +969,7 @@ MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
static void tpm_tis_pnp_remove(struct pnp_dev *dev)
{
struct tpm_chip *chip = pnp_get_drvdata(dev);
+
tpm_chip_unregister(chip);
tpm_tis_remove(chip);
}
@@ -950,6 +990,79 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
#endif
+#ifdef CONFIG_ACPI
+static int tpm_check_resource(struct acpi_resource *ares, void *data)
+{
+ struct tpm_info *tpm_info = (struct tpm_info *) data;
+ struct resource res;
+
+ if (acpi_dev_resource_interrupt(ares, 0, &res)) {
+ tpm_info->irq = res.start;
+ } else if (acpi_dev_resource_memory(ares, &res)) {
+ tpm_info->start = res.start;
+ tpm_info->len = resource_size(&res);
+ }
+
+ return 1;
+}
+
+static int tpm_tis_acpi_init(struct acpi_device *acpi_dev)
+{
+ struct list_head resources;
+ struct tpm_info tpm_info = tis_default_info;
+ int ret;
+
+ if (!is_fifo(acpi_dev))
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&resources);
+ ret = acpi_dev_get_resources(acpi_dev, &resources, tpm_check_resource,
+ &tpm_info);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&resources);
+
+ if (!tpm_info.irq)
+ interrupts = false;
+
+ if (is_itpm(acpi_dev))
+ itpm = true;
+
+ return tpm_tis_init(&acpi_dev->dev, &tpm_info, acpi_dev->handle);
+}
+
+static int tpm_tis_acpi_remove(struct acpi_device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+
+ return 0;
+}
+
+static struct acpi_device_id tpm_acpi_tbl[] = {
+ {"MSFT0101", 0}, /* TPM 2.0 */
+ /* Add new here */
+ {"", 0}, /* User Specified */
+ {"", 0} /* Terminator */
+};
+MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl);
+
+static struct acpi_driver tis_acpi_driver = {
+ .name = "tpm_tis",
+ .ids = tpm_acpi_tbl,
+ .ops = {
+ .add = tpm_tis_acpi_init,
+ .remove = tpm_tis_acpi_remove,
+ },
+ .drv = {
+ .pm = &tpm_tis_pm,
+ },
+};
+#endif
+
static struct platform_driver tis_drv = {
.driver = {
.name = "tpm_tis",
@@ -966,9 +1079,25 @@ static int __init init_tis(void)
{
int rc;
#ifdef CONFIG_PNP
- if (!force)
- return pnp_register_driver(&tis_pnp_driver);
+ if (!force) {
+ rc = pnp_register_driver(&tis_pnp_driver);
+ if (rc)
+ return rc;
+ }
+#endif
+#ifdef CONFIG_ACPI
+ if (!force) {
+ rc = acpi_bus_register_driver(&tis_acpi_driver);
+ if (rc) {
+#ifdef CONFIG_PNP
+ pnp_unregister_driver(&tis_pnp_driver);
#endif
+ return rc;
+ }
+ }
+#endif
+ if (!force)
+ return 0;
rc = platform_driver_register(&tis_drv);
if (rc < 0)
@@ -978,7 +1107,7 @@ static int __init init_tis(void)
rc = PTR_ERR(pdev);
goto err_dev;
}
- rc = tpm_tis_init(&pdev->dev, NULL, TIS_MEM_BASE, TIS_MEM_LEN, 0);
+ rc = tpm_tis_init(&pdev->dev, &tis_default_info, NULL);
if (rc)
goto err_init;
return 0;
@@ -992,9 +1121,14 @@ err_dev:
static void __exit cleanup_tis(void)
{
struct tpm_chip *chip;
-#ifdef CONFIG_PNP
+#if defined(CONFIG_PNP) || defined(CONFIG_ACPI)
if (!force) {
+#ifdef CONFIG_ACPI
+ acpi_bus_unregister_driver(&tis_acpi_driver);
+#endif
+#ifdef CONFIG_PNP
pnp_unregister_driver(&tis_pnp_driver);
+#endif
return;
}
#endif
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 42f7120ca9ce..57316528e924 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -14,6 +14,7 @@ config COMMON_CLK
select HAVE_CLK_PREPARE
select CLKDEV_LOOKUP
select SRCU
+ select RATIONAL
---help---
The common clock framework is a single definition of struct
clk, useful across many platforms, as well as an
@@ -68,6 +69,16 @@ config COMMON_CLK_SI5351
This driver supports Silicon Labs 5351A/B/C programmable clock
generators.
+config COMMON_CLK_SI514
+ tristate "Clock driver for SiLabs 514 devices"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ ---help---
+ This driver supports the Silicon Labs 514 programmable clock
+ generator.
+
config COMMON_CLK_SI570
tristate "Clock driver for SiLabs 570 and compatible devices"
depends on I2C
@@ -113,7 +124,7 @@ config CLK_TWL6040
config COMMON_CLK_AXI_CLKGEN
tristate "AXI clkgen driver"
- depends on ARCH_ZYNQ || MICROBLAZE
+ depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
help
---help---
Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
@@ -121,7 +132,7 @@ config COMMON_CLK_AXI_CLKGEN
config CLK_QORIQ
bool "Clock driver for Freescale QorIQ platforms"
- depends on (PPC_E500MC || ARM) && OF
+ depends on (PPC_E500MC || ARM || COMPILE_TEST) && OF
---help---
This adds the clock driver support for Freescale QorIQ platforms
using common clock framework.
@@ -129,13 +140,13 @@ config CLK_QORIQ
config COMMON_CLK_XGENE
bool "Clock driver for APM XGene SoC"
default y
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
---help---
Sypport for the APM X-Gene SoC reference, PLL, and device clocks.
config COMMON_CLK_KEYSTONE
tristate "Clock drivers for Keystone based SOCs"
- depends on ARCH_KEYSTONE && OF
+ depends on (ARCH_KEYSTONE || COMPILE_TEST) && OF
---help---
Supports clock drivers for Keystone based SOCs. These SOCs have local
a power sleep control module that gate the clock to the IPs and PLLs.
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index d08b3e5985be..d3e1910eebab 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_COMMON_CLK) += clk-divider.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o
obj-$(CONFIG_COMMON_CLK) += clk-gate.o
+obj-$(CONFIG_COMMON_CLK) += clk-multiplier.o
obj-$(CONFIG_COMMON_CLK) += clk-mux.o
obj-$(CONFIG_COMMON_CLK) += clk-composite.o
obj-$(CONFIG_COMMON_CLK) += clk-fractional-divider.o
@@ -19,7 +20,6 @@ endif
obj-$(CONFIG_MACH_ASM9260) += clk-asm9260.o
obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o
-obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
@@ -37,6 +37,7 @@ obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o
obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o
obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
+obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o
obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o
@@ -47,7 +48,7 @@ obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o
obj-$(CONFIG_COMMON_CLK_AT91) += at91/
-obj-$(CONFIG_ARCH_BCM) += bcm/
+obj-y += bcm/
obj-$(CONFIG_ARCH_BERLIN) += berlin/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-$(CONFIG_ARCH_MXC) += imx/
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 89a48a7bd5df..13e67bd35cff 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_HAVE_AT91_UTMI) += clk-utmi.o
obj-$(CONFIG_HAVE_AT91_USB_CLK) += clk-usb.o
obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o
obj-$(CONFIG_HAVE_AT91_H32MX) += clk-h32mx.o
+obj-$(CONFIG_HAVE_AT91_GENERATED_CLK) += clk-generated.o
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
new file mode 100644
index 000000000000..abc80949e1dd
--- /dev/null
+++ b/drivers/clk/at91/clk-generated.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2015 Atmel Corporation,
+ * Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ * Based on clk-programmable & clk-peripheral drivers by Boris BREZILLON.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define PERIPHERAL_MAX 64
+#define PERIPHERAL_ID_MIN 2
+
+#define GENERATED_SOURCE_MAX 6
+#define GENERATED_MAX_DIV 255
+
+struct clk_generated {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ struct clk_range range;
+ u32 id;
+ u32 gckdiv;
+ u8 parent_id;
+};
+
+#define to_clk_generated(hw) \
+ container_of(hw, struct clk_generated, hw)
+
+static int clk_generated_enable(struct clk_hw *hw)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+ struct at91_pmc *pmc = gck->pmc;
+ u32 tmp;
+
+ pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
+ __func__, gck->gckdiv, gck->parent_id);
+
+ pmc_lock(pmc);
+ pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
+ tmp = pmc_read(pmc, AT91_PMC_PCR) &
+ ~(AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK);
+ pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_GCKCSS(gck->parent_id)
+ | AT91_PMC_PCR_CMD
+ | AT91_PMC_PCR_GCKDIV(gck->gckdiv)
+ | AT91_PMC_PCR_GCKEN);
+ pmc_unlock(pmc);
+ return 0;
+}
+
+static void clk_generated_disable(struct clk_hw *hw)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+ struct at91_pmc *pmc = gck->pmc;
+ u32 tmp;
+
+ pmc_lock(pmc);
+ pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
+ tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_GCKEN;
+ pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD);
+ pmc_unlock(pmc);
+}
+
+static int clk_generated_is_enabled(struct clk_hw *hw)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+ struct at91_pmc *pmc = gck->pmc;
+ int ret;
+
+ pmc_lock(pmc);
+ pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
+ ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_GCKEN);
+ pmc_unlock(pmc);
+
+ return ret;
+}
+
+static unsigned long
+clk_generated_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+
+ return DIV_ROUND_CLOSEST(parent_rate, gck->gckdiv + 1);
+}
+
+static int clk_generated_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+ struct clk_hw *parent = NULL;
+ long best_rate = -EINVAL;
+ unsigned long tmp_rate, min_rate;
+ int best_diff = -1;
+ int tmp_diff;
+ int i;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ u32 div;
+ unsigned long parent_rate;
+
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+ min_rate = DIV_ROUND_CLOSEST(parent_rate, GENERATED_MAX_DIV + 1);
+ if (!parent_rate ||
+ (gck->range.max && min_rate > gck->range.max))
+ continue;
+
+ for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
+ tmp_rate = DIV_ROUND_CLOSEST(parent_rate, div);
+ tmp_diff = abs(req->rate - tmp_rate);
+
+ if (best_diff < 0 || best_diff > tmp_diff) {
+ best_rate = tmp_rate;
+ best_diff = tmp_diff;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+
+ if (!best_diff || tmp_rate < req->rate)
+ break;
+ }
+
+ if (!best_diff)
+ break;
+ }
+
+ pr_debug("GCLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
+ __func__, best_rate,
+ __clk_get_name((req->best_parent_hw)->clk),
+ req->best_parent_rate);
+
+ if (best_rate < 0)
+ return best_rate;
+
+ req->rate = best_rate;
+ return 0;
+}
+
+/* No modification of hardware as we have the flag CLK_SET_PARENT_GATE set */
+static int clk_generated_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+
+ if (index >= clk_hw_get_num_parents(hw))
+ return -EINVAL;
+
+ gck->parent_id = index;
+ return 0;
+}
+
+static u8 clk_generated_get_parent(struct clk_hw *hw)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+
+ return gck->parent_id;
+}
+
+/* No modification of hardware as we have the flag CLK_SET_RATE_GATE set */
+static int clk_generated_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+ u32 div;
+
+ if (!rate)
+ return -EINVAL;
+
+ if (gck->range.max && rate > gck->range.max)
+ return -EINVAL;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ if (div > GENERATED_MAX_DIV + 1 || !div)
+ return -EINVAL;
+
+ gck->gckdiv = div - 1;
+ return 0;
+}
+
+static const struct clk_ops generated_ops = {
+ .enable = clk_generated_enable,
+ .disable = clk_generated_disable,
+ .is_enabled = clk_generated_is_enabled,
+ .recalc_rate = clk_generated_recalc_rate,
+ .determine_rate = clk_generated_determine_rate,
+ .get_parent = clk_generated_get_parent,
+ .set_parent = clk_generated_set_parent,
+ .set_rate = clk_generated_set_rate,
+};
+
+/**
+ * clk_generated_startup - Initialize a given clock to its default parent and
+ * divisor parameter.
+ *
+ * @gck: Generated clock to set the startup parameters for.
+ *
+ * Take parameters from the hardware and update local clock configuration
+ * accordingly.
+ */
+static void clk_generated_startup(struct clk_generated *gck)
+{
+ struct at91_pmc *pmc = gck->pmc;
+ u32 tmp;
+
+ pmc_lock(pmc);
+ pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
+ tmp = pmc_read(pmc, AT91_PMC_PCR);
+ pmc_unlock(pmc);
+
+ gck->parent_id = (tmp & AT91_PMC_PCR_GCKCSS_MASK)
+ >> AT91_PMC_PCR_GCKCSS_OFFSET;
+ gck->gckdiv = (tmp & AT91_PMC_PCR_GCKDIV_MASK)
+ >> AT91_PMC_PCR_GCKDIV_OFFSET;
+}
+
+static struct clk * __init
+at91_clk_register_generated(struct at91_pmc *pmc, const char *name,
+ const char **parent_names, u8 num_parents,
+ u8 id, const struct clk_range *range)
+{
+ struct clk_generated *gck;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ gck = kzalloc(sizeof(*gck), GFP_KERNEL);
+ if (!gck)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &generated_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ gck->id = id;
+ gck->hw.init = &init;
+ gck->pmc = pmc;
+ gck->range = *range;
+
+ clk = clk_register(NULL, &gck->hw);
+ if (IS_ERR(clk))
+ kfree(gck);
+ else
+ clk_generated_startup(gck);
+
+ return clk;
+}
+
+void __init of_sama5d2_clk_generated_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ int num;
+ u32 id;
+ const char *name;
+ struct clk *clk;
+ int num_parents;
+ const char *parent_names[GENERATED_SOURCE_MAX];
+ struct device_node *gcknp;
+ struct clk_range range = CLK_RANGE(0, 0);
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents <= 0 || num_parents > GENERATED_SOURCE_MAX)
+ return;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+
+ num = of_get_child_count(np);
+ if (!num || num > PERIPHERAL_MAX)
+ return;
+
+ for_each_child_of_node(np, gcknp) {
+ if (of_property_read_u32(gcknp, "reg", &id))
+ continue;
+
+ if (id < PERIPHERAL_ID_MIN || id >= PERIPHERAL_MAX)
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = gcknp->name;
+
+ of_at91_get_clk_range(gcknp, "atmel,clk-output-range",
+ &range);
+
+ clk = at91_clk_register_generated(pmc, name, parent_names,
+ num_parents, id, &range);
+ if (IS_ERR(clk))
+ continue;
+
+ of_clk_add_provider(gcknp, of_clk_src_simple_get, clk);
+ }
+}
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index e4d7b574f1ea..58f3b568e9cb 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -161,14 +161,18 @@ static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
struct at91_pmc *pmc = periph->pmc;
+ u32 tmp;
if (periph->id < PERIPHERAL_ID_MIN)
return 0;
- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID) |
- AT91_PMC_PCR_CMD |
- AT91_PMC_PCR_DIV(periph->div) |
- AT91_PMC_PCR_EN);
+ pmc_lock(pmc);
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
+ tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_DIV_MASK;
+ pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_DIV(periph->div)
+ | AT91_PMC_PCR_CMD
+ | AT91_PMC_PCR_EN);
+ pmc_unlock(pmc);
return 0;
}
@@ -176,12 +180,16 @@ static void clk_sam9x5_peripheral_disable(struct clk_hw *hw)
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
struct at91_pmc *pmc = periph->pmc;
+ u32 tmp;
if (periph->id < PERIPHERAL_ID_MIN)
return;
- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID) |
- AT91_PMC_PCR_CMD);
+ pmc_lock(pmc);
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
+ tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_EN;
+ pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD);
+ pmc_unlock(pmc);
}
static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw)
@@ -194,7 +202,7 @@ static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw)
return 1;
pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID));
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_EN);
pmc_unlock(pmc);
@@ -213,7 +221,7 @@ clk_sam9x5_peripheral_recalc_rate(struct clk_hw *hw,
return parent_rate;
pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID));
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
tmp = pmc_read(pmc, AT91_PMC_PCR);
pmc_unlock(pmc);
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index 58008b3e8bc1..3f5314344286 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -138,7 +138,8 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name,
clk = clk_register(NULL, &sys->hw);
if (IS_ERR(clk)) {
- free_irq(sys->irq, sys);
+ if (irq)
+ free_irq(sys->irq, sys);
kfree(sys);
}
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index 30dd697b1668..ca561e90a60f 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -47,7 +47,7 @@ static int clk_utmi_prepare(struct clk_hw *hw)
{
struct clk_utmi *utmi = to_clk_utmi(hw);
struct at91_pmc *pmc = utmi->pmc;
- u32 tmp = at91_pmc_read(AT91_CKGR_UCKR) | AT91_PMC_UPLLEN |
+ u32 tmp = pmc_read(pmc, AT91_CKGR_UCKR) | AT91_PMC_UPLLEN |
AT91_PMC_UPLLCOUNT | AT91_PMC_BIASEN;
pmc_write(pmc, AT91_CKGR_UCKR, tmp);
@@ -73,7 +73,7 @@ static void clk_utmi_unprepare(struct clk_hw *hw)
{
struct clk_utmi *utmi = to_clk_utmi(hw);
struct at91_pmc *pmc = utmi->pmc;
- u32 tmp = at91_pmc_read(AT91_CKGR_UCKR) & ~AT91_PMC_UPLLEN;
+ u32 tmp = pmc_read(pmc, AT91_CKGR_UCKR) & ~AT91_PMC_UPLLEN;
pmc_write(pmc, AT91_CKGR_UCKR, tmp);
}
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index d1844f1f3729..8476b570779b 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -206,6 +206,14 @@ static const struct at91_pmc_caps at91sam9x5_caps = {
AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
};
+static const struct at91_pmc_caps sama5d2_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
+ AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
+ AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS |
+ AT91_PMC_CFDEV | AT91_PMC_GCKRDY,
+};
+
static const struct at91_pmc_caps sama5d3_caps = {
.available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
@@ -369,6 +377,12 @@ static const struct of_device_id pmc_clk_ids[] __initconst = {
.data = of_sama5d4_clk_h32mx_setup,
},
#endif
+#if defined(CONFIG_HAVE_AT91_GENERATED_CLK)
+ {
+ .compatible = "atmel,sama5d2-clk-generated",
+ .data = of_sama5d2_clk_generated_setup,
+ },
+#endif
{ /*sentinel*/ }
};
@@ -436,6 +450,13 @@ static void __init of_at91sam9x5_pmc_setup(struct device_node *np)
CLK_OF_DECLARE(at91sam9x5_clk_pmc, "atmel,at91sam9x5-pmc",
of_at91sam9x5_pmc_setup);
+static void __init of_sama5d2_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &sama5d2_caps);
+}
+CLK_OF_DECLARE(sama5d2_clk_pmc, "atmel,sama5d2-pmc",
+ of_sama5d2_pmc_setup);
+
static void __init of_sama5d3_pmc_setup(struct device_node *np)
{
of_at91_pmc_setup(np, &sama5d3_caps);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 8b87771c69b2..f65739272779 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -118,4 +118,7 @@ void of_at91sam9x5_clk_smd_setup(struct device_node *np,
void of_sama5d4_clk_h32mx_setup(struct device_node *np,
struct at91_pmc *pmc);
+void of_sama5d2_clk_generated_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
#endif /* __PMC_H_ */
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index 88febf53b276..85260fb96b36 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -1,6 +1,6 @@
config CLK_BCM_KONA
bool "Broadcom Kona CCU clock support"
- depends on ARCH_BCM_MOBILE
+ depends on ARCH_BCM_MOBILE || COMPILE_TEST
depends on COMMON_CLK
default y
help
@@ -9,10 +9,8 @@ config CLK_BCM_KONA
in the BCM281xx and BCM21664 families.
config COMMON_CLK_IPROC
- bool "Broadcom iProc clock support"
- depends on ARCH_BCM_IPROC
+ bool
depends on COMMON_CLK
- default ARCH_BCM_IPROC
help
Enable common clock framework support for Broadcom SoCs
based on the iProc architecture
diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile
index 8a7a477862c7..3fc95060d875 100644
--- a/drivers/clk/bcm/Makefile
+++ b/drivers/clk/bcm/Makefile
@@ -3,4 +3,8 @@ obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o
obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm281xx.o
obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm21664.o
obj-$(CONFIG_COMMON_CLK_IPROC) += clk-iproc-armpll.o clk-iproc-pll.o clk-iproc-asiu.o
+obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
+obj-$(CONFIG_COMMON_CLK_IPROC) += clk-ns2.o
obj-$(CONFIG_ARCH_BCM_CYGNUS) += clk-cygnus.o
+obj-$(CONFIG_ARCH_BCM_NSP) += clk-nsp.o
+obj-$(CONFIG_ARCH_BCM_5301X) += clk-nsp.o
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
new file mode 100644
index 000000000000..39bf5820297e
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -0,0 +1,1575 @@
+/*
+ * Copyright (C) 2010,2015 Broadcom
+ * Copyright (C) 2012 Stephen Warren
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/**
+ * DOC: BCM2835 CPRMAN (clock manager for the "audio" domain)
+ *
+ * The clock tree on the 2835 has several levels. There's a root
+ * oscillator running at 19.2Mhz. After the oscillator there are 5
+ * PLLs, roughly divided as "camera", "ARM", "core", "DSI displays",
+ * and "HDMI displays". Those 5 PLLs each can divide their output to
+ * produce up to 4 channels. Finally, there is the level of clocks to
+ * be consumed by other hardware components (like "H264" or "HDMI
+ * state machine"), which divide off of some subset of the PLL
+ * channels.
+ *
+ * All of the clocks in the tree are exposed in the DT, because the DT
+ * may want to make assignments of the final layer of clocks to the
+ * PLL channels, and some components of the hardware will actually
+ * skip layers of the tree (for example, the pixel clock comes
+ * directly from the PLLH PIX channel without using a CM_*CTL clock
+ * generator).
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/bcm2835.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <dt-bindings/clock/bcm2835.h>
+
+#define CM_PASSWORD 0x5a000000
+
+#define CM_GNRICCTL 0x000
+#define CM_GNRICDIV 0x004
+# define CM_DIV_FRAC_BITS 12
+
+#define CM_VPUCTL 0x008
+#define CM_VPUDIV 0x00c
+#define CM_SYSCTL 0x010
+#define CM_SYSDIV 0x014
+#define CM_PERIACTL 0x018
+#define CM_PERIADIV 0x01c
+#define CM_PERIICTL 0x020
+#define CM_PERIIDIV 0x024
+#define CM_H264CTL 0x028
+#define CM_H264DIV 0x02c
+#define CM_ISPCTL 0x030
+#define CM_ISPDIV 0x034
+#define CM_V3DCTL 0x038
+#define CM_V3DDIV 0x03c
+#define CM_CAM0CTL 0x040
+#define CM_CAM0DIV 0x044
+#define CM_CAM1CTL 0x048
+#define CM_CAM1DIV 0x04c
+#define CM_CCP2CTL 0x050
+#define CM_CCP2DIV 0x054
+#define CM_DSI0ECTL 0x058
+#define CM_DSI0EDIV 0x05c
+#define CM_DSI0PCTL 0x060
+#define CM_DSI0PDIV 0x064
+#define CM_DPICTL 0x068
+#define CM_DPIDIV 0x06c
+#define CM_GP0CTL 0x070
+#define CM_GP0DIV 0x074
+#define CM_GP1CTL 0x078
+#define CM_GP1DIV 0x07c
+#define CM_GP2CTL 0x080
+#define CM_GP2DIV 0x084
+#define CM_HSMCTL 0x088
+#define CM_HSMDIV 0x08c
+#define CM_OTPCTL 0x090
+#define CM_OTPDIV 0x094
+#define CM_PWMCTL 0x0a0
+#define CM_PWMDIV 0x0a4
+#define CM_SMICTL 0x0b0
+#define CM_SMIDIV 0x0b4
+#define CM_TSENSCTL 0x0e0
+#define CM_TSENSDIV 0x0e4
+#define CM_TIMERCTL 0x0e8
+#define CM_TIMERDIV 0x0ec
+#define CM_UARTCTL 0x0f0
+#define CM_UARTDIV 0x0f4
+#define CM_VECCTL 0x0f8
+#define CM_VECDIV 0x0fc
+#define CM_PULSECTL 0x190
+#define CM_PULSEDIV 0x194
+#define CM_SDCCTL 0x1a8
+#define CM_SDCDIV 0x1ac
+#define CM_ARMCTL 0x1b0
+#define CM_EMMCCTL 0x1c0
+#define CM_EMMCDIV 0x1c4
+
+/* General bits for the CM_*CTL regs */
+# define CM_ENABLE BIT(4)
+# define CM_KILL BIT(5)
+# define CM_GATE_BIT 6
+# define CM_GATE BIT(CM_GATE_BIT)
+# define CM_BUSY BIT(7)
+# define CM_BUSYD BIT(8)
+# define CM_SRC_SHIFT 0
+# define CM_SRC_BITS 4
+# define CM_SRC_MASK 0xf
+# define CM_SRC_GND 0
+# define CM_SRC_OSC 1
+# define CM_SRC_TESTDEBUG0 2
+# define CM_SRC_TESTDEBUG1 3
+# define CM_SRC_PLLA_CORE 4
+# define CM_SRC_PLLA_PER 4
+# define CM_SRC_PLLC_CORE0 5
+# define CM_SRC_PLLC_PER 5
+# define CM_SRC_PLLC_CORE1 8
+# define CM_SRC_PLLD_CORE 6
+# define CM_SRC_PLLD_PER 6
+# define CM_SRC_PLLH_AUX 7
+# define CM_SRC_PLLC_CORE1 8
+# define CM_SRC_PLLC_CORE2 9
+
+#define CM_OSCCOUNT 0x100
+
+#define CM_PLLA 0x104
+# define CM_PLL_ANARST BIT(8)
+# define CM_PLLA_HOLDPER BIT(7)
+# define CM_PLLA_LOADPER BIT(6)
+# define CM_PLLA_HOLDCORE BIT(5)
+# define CM_PLLA_LOADCORE BIT(4)
+# define CM_PLLA_HOLDCCP2 BIT(3)
+# define CM_PLLA_LOADCCP2 BIT(2)
+# define CM_PLLA_HOLDDSI0 BIT(1)
+# define CM_PLLA_LOADDSI0 BIT(0)
+
+#define CM_PLLC 0x108
+# define CM_PLLC_HOLDPER BIT(7)
+# define CM_PLLC_LOADPER BIT(6)
+# define CM_PLLC_HOLDCORE2 BIT(5)
+# define CM_PLLC_LOADCORE2 BIT(4)
+# define CM_PLLC_HOLDCORE1 BIT(3)
+# define CM_PLLC_LOADCORE1 BIT(2)
+# define CM_PLLC_HOLDCORE0 BIT(1)
+# define CM_PLLC_LOADCORE0 BIT(0)
+
+#define CM_PLLD 0x10c
+# define CM_PLLD_HOLDPER BIT(7)
+# define CM_PLLD_LOADPER BIT(6)
+# define CM_PLLD_HOLDCORE BIT(5)
+# define CM_PLLD_LOADCORE BIT(4)
+# define CM_PLLD_HOLDDSI1 BIT(3)
+# define CM_PLLD_LOADDSI1 BIT(2)
+# define CM_PLLD_HOLDDSI0 BIT(1)
+# define CM_PLLD_LOADDSI0 BIT(0)
+
+#define CM_PLLH 0x110
+# define CM_PLLH_LOADRCAL BIT(2)
+# define CM_PLLH_LOADAUX BIT(1)
+# define CM_PLLH_LOADPIX BIT(0)
+
+#define CM_LOCK 0x114
+# define CM_LOCK_FLOCKH BIT(12)
+# define CM_LOCK_FLOCKD BIT(11)
+# define CM_LOCK_FLOCKC BIT(10)
+# define CM_LOCK_FLOCKB BIT(9)
+# define CM_LOCK_FLOCKA BIT(8)
+
+#define CM_EVENT 0x118
+#define CM_DSI1ECTL 0x158
+#define CM_DSI1EDIV 0x15c
+#define CM_DSI1PCTL 0x160
+#define CM_DSI1PDIV 0x164
+#define CM_DFTCTL 0x168
+#define CM_DFTDIV 0x16c
+
+#define CM_PLLB 0x170
+# define CM_PLLB_HOLDARM BIT(1)
+# define CM_PLLB_LOADARM BIT(0)
+
+#define A2W_PLLA_CTRL 0x1100
+#define A2W_PLLC_CTRL 0x1120
+#define A2W_PLLD_CTRL 0x1140
+#define A2W_PLLH_CTRL 0x1160
+#define A2W_PLLB_CTRL 0x11e0
+# define A2W_PLL_CTRL_PRST_DISABLE BIT(17)
+# define A2W_PLL_CTRL_PWRDN BIT(16)
+# define A2W_PLL_CTRL_PDIV_MASK 0x000007000
+# define A2W_PLL_CTRL_PDIV_SHIFT 12
+# define A2W_PLL_CTRL_NDIV_MASK 0x0000003ff
+# define A2W_PLL_CTRL_NDIV_SHIFT 0
+
+#define A2W_PLLA_ANA0 0x1010
+#define A2W_PLLC_ANA0 0x1030
+#define A2W_PLLD_ANA0 0x1050
+#define A2W_PLLH_ANA0 0x1070
+#define A2W_PLLB_ANA0 0x10f0
+
+#define A2W_PLL_KA_SHIFT 7
+#define A2W_PLL_KA_MASK GENMASK(9, 7)
+#define A2W_PLL_KI_SHIFT 19
+#define A2W_PLL_KI_MASK GENMASK(21, 19)
+#define A2W_PLL_KP_SHIFT 15
+#define A2W_PLL_KP_MASK GENMASK(18, 15)
+
+#define A2W_PLLH_KA_SHIFT 19
+#define A2W_PLLH_KA_MASK GENMASK(21, 19)
+#define A2W_PLLH_KI_LOW_SHIFT 22
+#define A2W_PLLH_KI_LOW_MASK GENMASK(23, 22)
+#define A2W_PLLH_KI_HIGH_SHIFT 0
+#define A2W_PLLH_KI_HIGH_MASK GENMASK(0, 0)
+#define A2W_PLLH_KP_SHIFT 1
+#define A2W_PLLH_KP_MASK GENMASK(4, 1)
+
+#define A2W_XOSC_CTRL 0x1190
+# define A2W_XOSC_CTRL_PLLB_ENABLE BIT(7)
+# define A2W_XOSC_CTRL_PLLA_ENABLE BIT(6)
+# define A2W_XOSC_CTRL_PLLD_ENABLE BIT(5)
+# define A2W_XOSC_CTRL_DDR_ENABLE BIT(4)
+# define A2W_XOSC_CTRL_CPR1_ENABLE BIT(3)
+# define A2W_XOSC_CTRL_USB_ENABLE BIT(2)
+# define A2W_XOSC_CTRL_HDMI_ENABLE BIT(1)
+# define A2W_XOSC_CTRL_PLLC_ENABLE BIT(0)
+
+#define A2W_PLLA_FRAC 0x1200
+#define A2W_PLLC_FRAC 0x1220
+#define A2W_PLLD_FRAC 0x1240
+#define A2W_PLLH_FRAC 0x1260
+#define A2W_PLLB_FRAC 0x12e0
+# define A2W_PLL_FRAC_MASK ((1 << A2W_PLL_FRAC_BITS) - 1)
+# define A2W_PLL_FRAC_BITS 20
+
+#define A2W_PLL_CHANNEL_DISABLE BIT(8)
+#define A2W_PLL_DIV_BITS 8
+#define A2W_PLL_DIV_SHIFT 0
+
+#define A2W_PLLA_DSI0 0x1300
+#define A2W_PLLA_CORE 0x1400
+#define A2W_PLLA_PER 0x1500
+#define A2W_PLLA_CCP2 0x1600
+
+#define A2W_PLLC_CORE2 0x1320
+#define A2W_PLLC_CORE1 0x1420
+#define A2W_PLLC_PER 0x1520
+#define A2W_PLLC_CORE0 0x1620
+
+#define A2W_PLLD_DSI0 0x1340
+#define A2W_PLLD_CORE 0x1440
+#define A2W_PLLD_PER 0x1540
+#define A2W_PLLD_DSI1 0x1640
+
+#define A2W_PLLH_AUX 0x1360
+#define A2W_PLLH_RCAL 0x1460
+#define A2W_PLLH_PIX 0x1560
+#define A2W_PLLH_STS 0x1660
+
+#define A2W_PLLH_CTRLR 0x1960
+#define A2W_PLLH_FRACR 0x1a60
+#define A2W_PLLH_AUXR 0x1b60
+#define A2W_PLLH_RCALR 0x1c60
+#define A2W_PLLH_PIXR 0x1d60
+#define A2W_PLLH_STSR 0x1e60
+
+#define A2W_PLLB_ARM 0x13e0
+#define A2W_PLLB_SP0 0x14e0
+#define A2W_PLLB_SP1 0x15e0
+#define A2W_PLLB_SP2 0x16e0
+
+#define LOCK_TIMEOUT_NS 100000000
+#define BCM2835_MAX_FB_RATE 1750000000u
+
+struct bcm2835_cprman {
+ struct device *dev;
+ void __iomem *regs;
+ spinlock_t regs_lock;
+ const char *osc_name;
+
+ struct clk_onecell_data onecell;
+ struct clk *clks[BCM2835_CLOCK_COUNT];
+};
+
+static inline void cprman_write(struct bcm2835_cprman *cprman, u32 reg, u32 val)
+{
+ writel(CM_PASSWORD | val, cprman->regs + reg);
+}
+
+static inline u32 cprman_read(struct bcm2835_cprman *cprman, u32 reg)
+{
+ return readl(cprman->regs + reg);
+}
+
+/*
+ * These are fixed clocks. They're probably not all root clocks and it may
+ * be possible to turn them on and off but until this is mapped out better
+ * it's the only way they can be used.
+ */
+void __init bcm2835_init_clocks(void)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT,
+ 126000000);
+ if (IS_ERR(clk))
+ pr_err("apb_pclk not registered\n");
+
+ clk = clk_register_fixed_rate(NULL, "uart0_pclk", NULL, CLK_IS_ROOT,
+ 3000000);
+ if (IS_ERR(clk))
+ pr_err("uart0_pclk not registered\n");
+ ret = clk_register_clkdev(clk, NULL, "20201000.uart");
+ if (ret)
+ pr_err("uart0_pclk alias not registered\n");
+
+ clk = clk_register_fixed_rate(NULL, "uart1_pclk", NULL, CLK_IS_ROOT,
+ 125000000);
+ if (IS_ERR(clk))
+ pr_err("uart1_pclk not registered\n");
+ ret = clk_register_clkdev(clk, NULL, "20215000.uart");
+ if (ret)
+ pr_err("uart1_pclk alias not registered\n");
+}
+
+struct bcm2835_pll_data {
+ const char *name;
+ u32 cm_ctrl_reg;
+ u32 a2w_ctrl_reg;
+ u32 frac_reg;
+ u32 ana_reg_base;
+ u32 reference_enable_mask;
+ /* Bit in CM_LOCK to indicate when the PLL has locked. */
+ u32 lock_mask;
+
+ const struct bcm2835_pll_ana_bits *ana;
+
+ unsigned long min_rate;
+ unsigned long max_rate;
+ /*
+ * Highest rate for the VCO before we have to use the
+ * pre-divide-by-2.
+ */
+ unsigned long max_fb_rate;
+};
+
+struct bcm2835_pll_ana_bits {
+ u32 mask0;
+ u32 set0;
+ u32 mask1;
+ u32 set1;
+ u32 mask3;
+ u32 set3;
+ u32 fb_prediv_mask;
+};
+
+static const struct bcm2835_pll_ana_bits bcm2835_ana_default = {
+ .mask0 = 0,
+ .set0 = 0,
+ .mask1 = ~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK),
+ .set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT),
+ .mask3 = ~A2W_PLL_KA_MASK,
+ .set3 = (2 << A2W_PLL_KA_SHIFT),
+ .fb_prediv_mask = BIT(14),
+};
+
+static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = {
+ .mask0 = ~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK),
+ .set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT),
+ .mask1 = ~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK),
+ .set1 = (6 << A2W_PLLH_KP_SHIFT),
+ .mask3 = 0,
+ .set3 = 0,
+ .fb_prediv_mask = BIT(11),
+};
+
+/*
+ * PLLA is the auxiliary PLL, used to drive the CCP2 (Compact Camera
+ * Port 2) transmitter clock.
+ *
+ * It is in the PX LDO power domain, which is on when the AUDIO domain
+ * is on.
+ */
+static const struct bcm2835_pll_data bcm2835_plla_data = {
+ .name = "plla",
+ .cm_ctrl_reg = CM_PLLA,
+ .a2w_ctrl_reg = A2W_PLLA_CTRL,
+ .frac_reg = A2W_PLLA_FRAC,
+ .ana_reg_base = A2W_PLLA_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_PLLA_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKA,
+
+ .ana = &bcm2835_ana_default,
+
+ .min_rate = 600000000u,
+ .max_rate = 2400000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE,
+};
+
+/* PLLB is used for the ARM's clock. */
+static const struct bcm2835_pll_data bcm2835_pllb_data = {
+ .name = "pllb",
+ .cm_ctrl_reg = CM_PLLB,
+ .a2w_ctrl_reg = A2W_PLLB_CTRL,
+ .frac_reg = A2W_PLLB_FRAC,
+ .ana_reg_base = A2W_PLLB_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_PLLB_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKB,
+
+ .ana = &bcm2835_ana_default,
+
+ .min_rate = 600000000u,
+ .max_rate = 3000000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE,
+};
+
+/*
+ * PLLC is the core PLL, used to drive the core VPU clock.
+ *
+ * It is in the PX LDO power domain, which is on when the AUDIO domain
+ * is on.
+*/
+static const struct bcm2835_pll_data bcm2835_pllc_data = {
+ .name = "pllc",
+ .cm_ctrl_reg = CM_PLLC,
+ .a2w_ctrl_reg = A2W_PLLC_CTRL,
+ .frac_reg = A2W_PLLC_FRAC,
+ .ana_reg_base = A2W_PLLC_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_PLLC_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKC,
+
+ .ana = &bcm2835_ana_default,
+
+ .min_rate = 600000000u,
+ .max_rate = 3000000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE,
+};
+
+/*
+ * PLLD is the display PLL, used to drive DSI display panels.
+ *
+ * It is in the PX LDO power domain, which is on when the AUDIO domain
+ * is on.
+ */
+static const struct bcm2835_pll_data bcm2835_plld_data = {
+ .name = "plld",
+ .cm_ctrl_reg = CM_PLLD,
+ .a2w_ctrl_reg = A2W_PLLD_CTRL,
+ .frac_reg = A2W_PLLD_FRAC,
+ .ana_reg_base = A2W_PLLD_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_DDR_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKD,
+
+ .ana = &bcm2835_ana_default,
+
+ .min_rate = 600000000u,
+ .max_rate = 2400000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE,
+};
+
+/*
+ * PLLH is used to supply the pixel clock or the AUX clock for the TV
+ * encoder.
+ *
+ * It is in the HDMI power domain.
+ */
+static const struct bcm2835_pll_data bcm2835_pllh_data = {
+ "pllh",
+ .cm_ctrl_reg = CM_PLLH,
+ .a2w_ctrl_reg = A2W_PLLH_CTRL,
+ .frac_reg = A2W_PLLH_FRAC,
+ .ana_reg_base = A2W_PLLH_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_PLLC_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKH,
+
+ .ana = &bcm2835_ana_pllh,
+
+ .min_rate = 600000000u,
+ .max_rate = 3000000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE,
+};
+
+struct bcm2835_pll_divider_data {
+ const char *name;
+ const struct bcm2835_pll_data *source_pll;
+ u32 cm_reg;
+ u32 a2w_reg;
+
+ u32 load_mask;
+ u32 hold_mask;
+ u32 fixed_divider;
+};
+
+static const struct bcm2835_pll_divider_data bcm2835_plla_core_data = {
+ .name = "plla_core",
+ .source_pll = &bcm2835_plla_data,
+ .cm_reg = CM_PLLA,
+ .a2w_reg = A2W_PLLA_CORE,
+ .load_mask = CM_PLLA_LOADCORE,
+ .hold_mask = CM_PLLA_HOLDCORE,
+ .fixed_divider = 1,
+};
+
+static const struct bcm2835_pll_divider_data bcm2835_plla_per_data = {
+ .name = "plla_per",
+ .source_pll = &bcm2835_plla_data,
+ .cm_reg = CM_PLLA,
+ .a2w_reg = A2W_PLLA_PER,
+ .load_mask = CM_PLLA_LOADPER,
+ .hold_mask = CM_PLLA_HOLDPER,
+ .fixed_divider = 1,
+};
+
+static const struct bcm2835_pll_divider_data bcm2835_pllb_arm_data = {
+ .name = "pllb_arm",
+ .source_pll = &bcm2835_pllb_data,
+ .cm_reg = CM_PLLB,
+ .a2w_reg = A2W_PLLB_ARM,
+ .load_mask = CM_PLLB_LOADARM,
+ .hold_mask = CM_PLLB_HOLDARM,
+ .fixed_divider = 1,
+};
+
+static const struct bcm2835_pll_divider_data bcm2835_pllc_core0_data = {
+ .name = "pllc_core0",
+ .source_pll = &bcm2835_pllc_data,
+ .cm_reg = CM_PLLC,
+ .a2w_reg = A2W_PLLC_CORE0,
+ .load_mask = CM_PLLC_LOADCORE0,
+ .hold_mask = CM_PLLC_HOLDCORE0,
+ .fixed_divider = 1,
+};
+
+static const struct bcm2835_pll_divider_data bcm2835_pllc_core1_data = {
+ .name = "pllc_core1", .source_pll = &bcm2835_pllc_data,
+ .cm_reg = CM_PLLC, A2W_PLLC_CORE1,
+ .load_mask = CM_PLLC_LOADCORE1,
+ .hold_mask = CM_PLLC_HOLDCORE1,
+ .fixed_divider = 1,
+};
+
+static const struct bcm2835_pll_divider_data bcm2835_pllc_core2_data = {
+ .name = "pllc_core2",
+ .source_pll = &bcm2835_pllc_data,
+ .cm_reg = CM_PLLC,
+ .a2w_reg = A2W_PLLC_CORE2,
+ .load_mask = CM_PLLC_LOADCORE2,
+ .hold_mask = CM_PLLC_HOLDCORE2,
+ .fixed_divider = 1,
+};
+
+static const struct bcm2835_pll_divider_data bcm2835_pllc_per_data = {
+ .name = "pllc_per",
+ .source_pll = &bcm2835_pllc_data,
+ .cm_reg = CM_PLLC,
+ .a2w_reg = A2W_PLLC_PER,
+ .load_mask = CM_PLLC_LOADPER,
+ .hold_mask = CM_PLLC_HOLDPER,
+ .fixed_divider = 1,
+};
+
+static const struct bcm2835_pll_divider_data bcm2835_plld_core_data = {
+ .name = "plld_core",
+ .source_pll = &bcm2835_plld_data,
+ .cm_reg = CM_PLLD,
+ .a2w_reg = A2W_PLLD_CORE,
+ .load_mask = CM_PLLD_LOADCORE,
+ .hold_mask = CM_PLLD_HOLDCORE,
+ .fixed_divider = 1,
+};
+
+static const struct bcm2835_pll_divider_data bcm2835_plld_per_data = {
+ .name = "plld_per",
+ .source_pll = &bcm2835_plld_data,
+ .cm_reg = CM_PLLD,
+ .a2w_reg = A2W_PLLD_PER,
+ .load_mask = CM_PLLD_LOADPER,
+ .hold_mask = CM_PLLD_HOLDPER,
+ .fixed_divider = 1,
+};
+
+static const struct bcm2835_pll_divider_data bcm2835_pllh_rcal_data = {
+ .name = "pllh_rcal",
+ .source_pll = &bcm2835_pllh_data,
+ .cm_reg = CM_PLLH,
+ .a2w_reg = A2W_PLLH_RCAL,
+ .load_mask = CM_PLLH_LOADRCAL,
+ .hold_mask = 0,
+ .fixed_divider = 10,
+};
+
+static const struct bcm2835_pll_divider_data bcm2835_pllh_aux_data = {
+ .name = "pllh_aux",
+ .source_pll = &bcm2835_pllh_data,
+ .cm_reg = CM_PLLH,
+ .a2w_reg = A2W_PLLH_AUX,
+ .load_mask = CM_PLLH_LOADAUX,
+ .hold_mask = 0,
+ .fixed_divider = 10,
+};
+
+static const struct bcm2835_pll_divider_data bcm2835_pllh_pix_data = {
+ .name = "pllh_pix",
+ .source_pll = &bcm2835_pllh_data,
+ .cm_reg = CM_PLLH,
+ .a2w_reg = A2W_PLLH_PIX,
+ .load_mask = CM_PLLH_LOADPIX,
+ .hold_mask = 0,
+ .fixed_divider = 10,
+};
+
+struct bcm2835_clock_data {
+ const char *name;
+
+ const char *const *parents;
+ int num_mux_parents;
+
+ u32 ctl_reg;
+ u32 div_reg;
+
+ /* Number of integer bits in the divider */
+ u32 int_bits;
+ /* Number of fractional bits in the divider */
+ u32 frac_bits;
+
+ bool is_vpu_clock;
+};
+
+static const char *const bcm2835_clock_per_parents[] = {
+ "gnd",
+ "xosc",
+ "testdebug0",
+ "testdebug1",
+ "plla_per",
+ "pllc_per",
+ "plld_per",
+ "pllh_aux",
+};
+
+static const char *const bcm2835_clock_vpu_parents[] = {
+ "gnd",
+ "xosc",
+ "testdebug0",
+ "testdebug1",
+ "plla_core",
+ "pllc_core0",
+ "plld_core",
+ "pllh_aux",
+ "pllc_core1",
+ "pllc_core2",
+};
+
+static const char *const bcm2835_clock_osc_parents[] = {
+ "gnd",
+ "xosc",
+ "testdebug0",
+ "testdebug1"
+};
+
+/*
+ * Used for a 1Mhz clock for the system clocksource, and also used by
+ * the watchdog timer and the camera pulse generator.
+ */
+static const struct bcm2835_clock_data bcm2835_clock_timer_data = {
+ .name = "timer",
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_osc_parents),
+ .parents = bcm2835_clock_osc_parents,
+ .ctl_reg = CM_TIMERCTL,
+ .div_reg = CM_TIMERDIV,
+ .int_bits = 6,
+ .frac_bits = 12,
+};
+
+/* One Time Programmable Memory clock. Maximum 10Mhz. */
+static const struct bcm2835_clock_data bcm2835_clock_otp_data = {
+ .name = "otp",
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_osc_parents),
+ .parents = bcm2835_clock_osc_parents,
+ .ctl_reg = CM_OTPCTL,
+ .div_reg = CM_OTPDIV,
+ .int_bits = 4,
+ .frac_bits = 0,
+};
+
+/*
+ * VPU clock. This doesn't have an enable bit, since it drives the
+ * bus for everything else, and is special so it doesn't need to be
+ * gated for rate changes. It is also known as "clk_audio" in various
+ * hardware documentation.
+ */
+static const struct bcm2835_clock_data bcm2835_clock_vpu_data = {
+ .name = "vpu",
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents),
+ .parents = bcm2835_clock_vpu_parents,
+ .ctl_reg = CM_VPUCTL,
+ .div_reg = CM_VPUDIV,
+ .int_bits = 12,
+ .frac_bits = 8,
+ .is_vpu_clock = true,
+};
+
+static const struct bcm2835_clock_data bcm2835_clock_v3d_data = {
+ .name = "v3d",
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents),
+ .parents = bcm2835_clock_vpu_parents,
+ .ctl_reg = CM_V3DCTL,
+ .div_reg = CM_V3DDIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+};
+
+static const struct bcm2835_clock_data bcm2835_clock_isp_data = {
+ .name = "isp",
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents),
+ .parents = bcm2835_clock_vpu_parents,
+ .ctl_reg = CM_ISPCTL,
+ .div_reg = CM_ISPDIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+};
+
+static const struct bcm2835_clock_data bcm2835_clock_h264_data = {
+ .name = "h264",
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents),
+ .parents = bcm2835_clock_vpu_parents,
+ .ctl_reg = CM_H264CTL,
+ .div_reg = CM_H264DIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+};
+
+/* TV encoder clock. Only operating frequency is 108Mhz. */
+static const struct bcm2835_clock_data bcm2835_clock_vec_data = {
+ .name = "vec",
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_per_parents),
+ .parents = bcm2835_clock_per_parents,
+ .ctl_reg = CM_VECCTL,
+ .div_reg = CM_VECDIV,
+ .int_bits = 4,
+ .frac_bits = 0,
+};
+
+static const struct bcm2835_clock_data bcm2835_clock_uart_data = {
+ .name = "uart",
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_per_parents),
+ .parents = bcm2835_clock_per_parents,
+ .ctl_reg = CM_UARTCTL,
+ .div_reg = CM_UARTDIV,
+ .int_bits = 10,
+ .frac_bits = 12,
+};
+
+/* HDMI state machine */
+static const struct bcm2835_clock_data bcm2835_clock_hsm_data = {
+ .name = "hsm",
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_per_parents),
+ .parents = bcm2835_clock_per_parents,
+ .ctl_reg = CM_HSMCTL,
+ .div_reg = CM_HSMDIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+};
+
+/*
+ * Secondary SDRAM clock. Used for low-voltage modes when the PLL in
+ * the SDRAM controller can't be used.
+ */
+static const struct bcm2835_clock_data bcm2835_clock_sdram_data = {
+ .name = "sdram",
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents),
+ .parents = bcm2835_clock_vpu_parents,
+ .ctl_reg = CM_SDCCTL,
+ .div_reg = CM_SDCDIV,
+ .int_bits = 6,
+ .frac_bits = 0,
+};
+
+/* Clock for the temperature sensor. Generally run at 2Mhz, max 5Mhz. */
+static const struct bcm2835_clock_data bcm2835_clock_tsens_data = {
+ .name = "tsens",
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_osc_parents),
+ .parents = bcm2835_clock_osc_parents,
+ .ctl_reg = CM_TSENSCTL,
+ .div_reg = CM_TSENSDIV,
+ .int_bits = 5,
+ .frac_bits = 0,
+};
+
+/* Arasan EMMC clock */
+static const struct bcm2835_clock_data bcm2835_clock_emmc_data = {
+ .name = "emmc",
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_per_parents),
+ .parents = bcm2835_clock_per_parents,
+ .ctl_reg = CM_EMMCCTL,
+ .div_reg = CM_EMMCDIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+};
+
+struct bcm2835_pll {
+ struct clk_hw hw;
+ struct bcm2835_cprman *cprman;
+ const struct bcm2835_pll_data *data;
+};
+
+static int bcm2835_pll_is_on(struct clk_hw *hw)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
+
+ return cprman_read(cprman, data->a2w_ctrl_reg) &
+ A2W_PLL_CTRL_PRST_DISABLE;
+}
+
+static void bcm2835_pll_choose_ndiv_and_fdiv(unsigned long rate,
+ unsigned long parent_rate,
+ u32 *ndiv, u32 *fdiv)
+{
+ u64 div;
+
+ div = (u64)rate << A2W_PLL_FRAC_BITS;
+ do_div(div, parent_rate);
+
+ *ndiv = div >> A2W_PLL_FRAC_BITS;
+ *fdiv = div & ((1 << A2W_PLL_FRAC_BITS) - 1);
+}
+
+static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
+ u32 ndiv, u32 fdiv, u32 pdiv)
+{
+ u64 rate;
+
+ if (pdiv == 0)
+ return 0;
+
+ rate = (u64)parent_rate * ((ndiv << A2W_PLL_FRAC_BITS) + fdiv);
+ do_div(rate, pdiv);
+ return rate >> A2W_PLL_FRAC_BITS;
+}
+
+static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ u32 ndiv, fdiv;
+
+ bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
+
+ return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
+}
+
+static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
+ u32 a2wctrl = cprman_read(cprman, data->a2w_ctrl_reg);
+ u32 ndiv, pdiv, fdiv;
+ bool using_prediv;
+
+ if (parent_rate == 0)
+ return 0;
+
+ fdiv = cprman_read(cprman, data->frac_reg) & A2W_PLL_FRAC_MASK;
+ ndiv = (a2wctrl & A2W_PLL_CTRL_NDIV_MASK) >> A2W_PLL_CTRL_NDIV_SHIFT;
+ pdiv = (a2wctrl & A2W_PLL_CTRL_PDIV_MASK) >> A2W_PLL_CTRL_PDIV_SHIFT;
+ using_prediv = cprman_read(cprman, data->ana_reg_base + 4) &
+ data->ana->fb_prediv_mask;
+
+ if (using_prediv)
+ ndiv *= 2;
+
+ return bcm2835_pll_rate_from_divisors(parent_rate, ndiv, fdiv, pdiv);
+}
+
+static void bcm2835_pll_off(struct clk_hw *hw)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
+
+ cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
+ cprman_write(cprman, data->a2w_ctrl_reg, A2W_PLL_CTRL_PWRDN);
+}
+
+static int bcm2835_pll_on(struct clk_hw *hw)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
+ ktime_t timeout;
+
+ /* Take the PLL out of reset. */
+ cprman_write(cprman, data->cm_ctrl_reg,
+ cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST);
+
+ /* Wait for the PLL to lock. */
+ timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
+ while (!(cprman_read(cprman, CM_LOCK) & data->lock_mask)) {
+ if (ktime_after(ktime_get(), timeout)) {
+ dev_err(cprman->dev, "%s: couldn't lock PLL\n",
+ clk_hw_get_name(hw));
+ return -ETIMEDOUT;
+ }
+
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static void
+bcm2835_pll_write_ana(struct bcm2835_cprman *cprman, u32 ana_reg_base, u32 *ana)
+{
+ int i;
+
+ /*
+ * ANA register setup is done as a series of writes to
+ * ANA3-ANA0, in that order. This lets us write all 4
+ * registers as a single cycle of the serdes interface (taking
+ * 100 xosc clocks), whereas if we were to update ana0, 1, and
+ * 3 individually through their partial-write registers, each
+ * would be their own serdes cycle.
+ */
+ for (i = 3; i >= 0; i--)
+ cprman_write(cprman, ana_reg_base + i * 4, ana[i]);
+}
+
+static int bcm2835_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
+ bool was_using_prediv, use_fb_prediv, do_ana_setup_first;
+ u32 ndiv, fdiv, a2w_ctl;
+ u32 ana[4];
+ int i;
+
+ if (rate < data->min_rate || rate > data->max_rate) {
+ dev_err(cprman->dev, "%s: rate out of spec: %lu vs (%lu, %lu)\n",
+ clk_hw_get_name(hw), rate,
+ data->min_rate, data->max_rate);
+ return -EINVAL;
+ }
+
+ if (rate > data->max_fb_rate) {
+ use_fb_prediv = true;
+ rate /= 2;
+ } else {
+ use_fb_prediv = false;
+ }
+
+ bcm2835_pll_choose_ndiv_and_fdiv(rate, parent_rate, &ndiv, &fdiv);
+
+ for (i = 3; i >= 0; i--)
+ ana[i] = cprman_read(cprman, data->ana_reg_base + i * 4);
+
+ was_using_prediv = ana[1] & data->ana->fb_prediv_mask;
+
+ ana[0] &= ~data->ana->mask0;
+ ana[0] |= data->ana->set0;
+ ana[1] &= ~data->ana->mask1;
+ ana[1] |= data->ana->set1;
+ ana[3] &= ~data->ana->mask3;
+ ana[3] |= data->ana->set3;
+
+ if (was_using_prediv && !use_fb_prediv) {
+ ana[1] &= ~data->ana->fb_prediv_mask;
+ do_ana_setup_first = true;
+ } else if (!was_using_prediv && use_fb_prediv) {
+ ana[1] |= data->ana->fb_prediv_mask;
+ do_ana_setup_first = false;
+ } else {
+ do_ana_setup_first = true;
+ }
+
+ /* Unmask the reference clock from the oscillator. */
+ cprman_write(cprman, A2W_XOSC_CTRL,
+ cprman_read(cprman, A2W_XOSC_CTRL) |
+ data->reference_enable_mask);
+
+ if (do_ana_setup_first)
+ bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana);
+
+ /* Set the PLL multiplier from the oscillator. */
+ cprman_write(cprman, data->frac_reg, fdiv);
+
+ a2w_ctl = cprman_read(cprman, data->a2w_ctrl_reg);
+ a2w_ctl &= ~A2W_PLL_CTRL_NDIV_MASK;
+ a2w_ctl |= ndiv << A2W_PLL_CTRL_NDIV_SHIFT;
+ a2w_ctl &= ~A2W_PLL_CTRL_PDIV_MASK;
+ a2w_ctl |= 1 << A2W_PLL_CTRL_PDIV_SHIFT;
+ cprman_write(cprman, data->a2w_ctrl_reg, a2w_ctl);
+
+ if (!do_ana_setup_first)
+ bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana);
+
+ return 0;
+}
+
+static const struct clk_ops bcm2835_pll_clk_ops = {
+ .is_prepared = bcm2835_pll_is_on,
+ .prepare = bcm2835_pll_on,
+ .unprepare = bcm2835_pll_off,
+ .recalc_rate = bcm2835_pll_get_rate,
+ .set_rate = bcm2835_pll_set_rate,
+ .round_rate = bcm2835_pll_round_rate,
+};
+
+struct bcm2835_pll_divider {
+ struct clk_divider div;
+ struct bcm2835_cprman *cprman;
+ const struct bcm2835_pll_divider_data *data;
+};
+
+static struct bcm2835_pll_divider *
+bcm2835_pll_divider_from_hw(struct clk_hw *hw)
+{
+ return container_of(hw, struct bcm2835_pll_divider, div.hw);
+}
+
+static int bcm2835_pll_divider_is_on(struct clk_hw *hw)
+{
+ struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ struct bcm2835_cprman *cprman = divider->cprman;
+ const struct bcm2835_pll_divider_data *data = divider->data;
+
+ return !(cprman_read(cprman, data->a2w_reg) & A2W_PLL_CHANNEL_DISABLE);
+}
+
+static long bcm2835_pll_divider_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return clk_divider_ops.round_rate(hw, rate, parent_rate);
+}
+
+static unsigned long bcm2835_pll_divider_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ struct bcm2835_cprman *cprman = divider->cprman;
+ const struct bcm2835_pll_divider_data *data = divider->data;
+ u32 div = cprman_read(cprman, data->a2w_reg);
+
+ div &= (1 << A2W_PLL_DIV_BITS) - 1;
+ if (div == 0)
+ div = 256;
+
+ return parent_rate / div;
+}
+
+static void bcm2835_pll_divider_off(struct clk_hw *hw)
+{
+ struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ struct bcm2835_cprman *cprman = divider->cprman;
+ const struct bcm2835_pll_divider_data *data = divider->data;
+
+ cprman_write(cprman, data->cm_reg,
+ (cprman_read(cprman, data->cm_reg) &
+ ~data->load_mask) | data->hold_mask);
+ cprman_write(cprman, data->a2w_reg, A2W_PLL_CHANNEL_DISABLE);
+}
+
+static int bcm2835_pll_divider_on(struct clk_hw *hw)
+{
+ struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ struct bcm2835_cprman *cprman = divider->cprman;
+ const struct bcm2835_pll_divider_data *data = divider->data;
+
+ cprman_write(cprman, data->a2w_reg,
+ cprman_read(cprman, data->a2w_reg) &
+ ~A2W_PLL_CHANNEL_DISABLE);
+
+ cprman_write(cprman, data->cm_reg,
+ cprman_read(cprman, data->cm_reg) & ~data->hold_mask);
+
+ return 0;
+}
+
+static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ struct bcm2835_cprman *cprman = divider->cprman;
+ const struct bcm2835_pll_divider_data *data = divider->data;
+ u32 cm;
+ int ret;
+
+ ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
+ if (ret)
+ return ret;
+
+ cm = cprman_read(cprman, data->cm_reg);
+ cprman_write(cprman, data->cm_reg, cm | data->load_mask);
+ cprman_write(cprman, data->cm_reg, cm & ~data->load_mask);
+
+ return 0;
+}
+
+static const struct clk_ops bcm2835_pll_divider_clk_ops = {
+ .is_prepared = bcm2835_pll_divider_is_on,
+ .prepare = bcm2835_pll_divider_on,
+ .unprepare = bcm2835_pll_divider_off,
+ .recalc_rate = bcm2835_pll_divider_get_rate,
+ .set_rate = bcm2835_pll_divider_set_rate,
+ .round_rate = bcm2835_pll_divider_round_rate,
+};
+
+/*
+ * The CM dividers do fixed-point division, so we can't use the
+ * generic integer divider code like the PLL dividers do (and we can't
+ * fake it by having some fixed shifts preceding it in the clock tree,
+ * because we'd run out of bits in a 32-bit unsigned long).
+ */
+struct bcm2835_clock {
+ struct clk_hw hw;
+ struct bcm2835_cprman *cprman;
+ const struct bcm2835_clock_data *data;
+};
+
+static struct bcm2835_clock *bcm2835_clock_from_hw(struct clk_hw *hw)
+{
+ return container_of(hw, struct bcm2835_clock, hw);
+}
+
+static int bcm2835_clock_is_on(struct clk_hw *hw)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+
+ return (cprman_read(cprman, data->ctl_reg) & CM_ENABLE) != 0;
+}
+
+static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ const struct bcm2835_clock_data *data = clock->data;
+ u32 unused_frac_mask = GENMASK(CM_DIV_FRAC_BITS - data->frac_bits, 0);
+ u64 temp = (u64)parent_rate << CM_DIV_FRAC_BITS;
+ u32 div;
+
+ do_div(temp, rate);
+ div = temp;
+
+ /* Round and mask off the unused bits */
+ if (unused_frac_mask != 0) {
+ div += unused_frac_mask >> 1;
+ div &= ~unused_frac_mask;
+ }
+
+ /* Clamp to the limits. */
+ div = max(div, unused_frac_mask + 1);
+ div = min_t(u32, div, GENMASK(data->int_bits + CM_DIV_FRAC_BITS - 1,
+ CM_DIV_FRAC_BITS - data->frac_bits));
+
+ return div;
+}
+
+static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
+ unsigned long parent_rate,
+ u32 div)
+{
+ const struct bcm2835_clock_data *data = clock->data;
+ u64 temp;
+
+ /*
+ * The divisor is a 12.12 fixed point field, but only some of
+ * the bits are populated in any given clock.
+ */
+ div >>= CM_DIV_FRAC_BITS - data->frac_bits;
+ div &= (1 << (data->int_bits + data->frac_bits)) - 1;
+
+ if (div == 0)
+ return 0;
+
+ temp = (u64)parent_rate << data->frac_bits;
+
+ do_div(temp, div);
+
+ return temp;
+}
+
+static long bcm2835_clock_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ u32 div = bcm2835_clock_choose_div(hw, rate, *parent_rate);
+
+ return bcm2835_clock_rate_from_divisor(clock, *parent_rate, div);
+}
+
+static unsigned long bcm2835_clock_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+ u32 div = cprman_read(cprman, data->div_reg);
+
+ return bcm2835_clock_rate_from_divisor(clock, parent_rate, div);
+}
+
+static void bcm2835_clock_wait_busy(struct bcm2835_clock *clock)
+{
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+ ktime_t timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
+
+ while (cprman_read(cprman, data->ctl_reg) & CM_BUSY) {
+ if (ktime_after(ktime_get(), timeout)) {
+ dev_err(cprman->dev, "%s: couldn't lock PLL\n",
+ clk_hw_get_name(&clock->hw));
+ return;
+ }
+ cpu_relax();
+ }
+}
+
+static void bcm2835_clock_off(struct clk_hw *hw)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+
+ spin_lock(&cprman->regs_lock);
+ cprman_write(cprman, data->ctl_reg,
+ cprman_read(cprman, data->ctl_reg) & ~CM_ENABLE);
+ spin_unlock(&cprman->regs_lock);
+
+ /* BUSY will remain high until the divider completes its cycle. */
+ bcm2835_clock_wait_busy(clock);
+}
+
+static int bcm2835_clock_on(struct clk_hw *hw)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+
+ spin_lock(&cprman->regs_lock);
+ cprman_write(cprman, data->ctl_reg,
+ cprman_read(cprman, data->ctl_reg) |
+ CM_ENABLE |
+ CM_GATE);
+ spin_unlock(&cprman->regs_lock);
+
+ return 0;
+}
+
+static int bcm2835_clock_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+ u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate);
+
+ cprman_write(cprman, data->div_reg, div);
+
+ return 0;
+}
+
+static const struct clk_ops bcm2835_clock_clk_ops = {
+ .is_prepared = bcm2835_clock_is_on,
+ .prepare = bcm2835_clock_on,
+ .unprepare = bcm2835_clock_off,
+ .recalc_rate = bcm2835_clock_get_rate,
+ .set_rate = bcm2835_clock_set_rate,
+ .round_rate = bcm2835_clock_round_rate,
+};
+
+static int bcm2835_vpu_clock_is_on(struct clk_hw *hw)
+{
+ return true;
+}
+
+/*
+ * The VPU clock can never be disabled (it doesn't have an ENABLE
+ * bit), so it gets its own set of clock ops.
+ */
+static const struct clk_ops bcm2835_vpu_clock_clk_ops = {
+ .is_prepared = bcm2835_vpu_clock_is_on,
+ .recalc_rate = bcm2835_clock_get_rate,
+ .set_rate = bcm2835_clock_set_rate,
+ .round_rate = bcm2835_clock_round_rate,
+};
+
+static struct clk *bcm2835_register_pll(struct bcm2835_cprman *cprman,
+ const struct bcm2835_pll_data *data)
+{
+ struct bcm2835_pll *pll;
+ struct clk_init_data init;
+
+ memset(&init, 0, sizeof(init));
+
+ /* All of the PLLs derive from the external oscillator. */
+ init.parent_names = &cprman->osc_name;
+ init.num_parents = 1;
+ init.name = data->name;
+ init.ops = &bcm2835_pll_clk_ops;
+ init.flags = CLK_IGNORE_UNUSED;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return NULL;
+
+ pll->cprman = cprman;
+ pll->data = data;
+ pll->hw.init = &init;
+
+ return devm_clk_register(cprman->dev, &pll->hw);
+}
+
+static struct clk *
+bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
+ const struct bcm2835_pll_divider_data *data)
+{
+ struct bcm2835_pll_divider *divider;
+ struct clk_init_data init;
+ struct clk *clk;
+ const char *divider_name;
+
+ if (data->fixed_divider != 1) {
+ divider_name = devm_kasprintf(cprman->dev, GFP_KERNEL,
+ "%s_prediv", data->name);
+ if (!divider_name)
+ return NULL;
+ } else {
+ divider_name = data->name;
+ }
+
+ memset(&init, 0, sizeof(init));
+
+ init.parent_names = &data->source_pll->name;
+ init.num_parents = 1;
+ init.name = divider_name;
+ init.ops = &bcm2835_pll_divider_clk_ops;
+ init.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED;
+
+ divider = devm_kzalloc(cprman->dev, sizeof(*divider), GFP_KERNEL);
+ if (!divider)
+ return NULL;
+
+ divider->div.reg = cprman->regs + data->a2w_reg;
+ divider->div.shift = A2W_PLL_DIV_SHIFT;
+ divider->div.width = A2W_PLL_DIV_BITS;
+ divider->div.flags = 0;
+ divider->div.lock = &cprman->regs_lock;
+ divider->div.hw.init = &init;
+ divider->div.table = NULL;
+
+ divider->cprman = cprman;
+ divider->data = data;
+
+ clk = devm_clk_register(cprman->dev, &divider->div.hw);
+ if (IS_ERR(clk))
+ return clk;
+
+ /*
+ * PLLH's channels have a fixed divide by 10 afterwards, which
+ * is what our consumers are actually using.
+ */
+ if (data->fixed_divider != 1) {
+ return clk_register_fixed_factor(cprman->dev, data->name,
+ divider_name,
+ CLK_SET_RATE_PARENT,
+ 1,
+ data->fixed_divider);
+ }
+
+ return clk;
+}
+
+static struct clk *bcm2835_register_clock(struct bcm2835_cprman *cprman,
+ const struct bcm2835_clock_data *data)
+{
+ struct bcm2835_clock *clock;
+ struct clk_init_data init;
+ const char *parent;
+
+ /*
+ * Most of the clock generators have a mux field, so we
+ * instantiate a generic mux as our parent to handle it.
+ */
+ if (data->num_mux_parents) {
+ const char *parents[1 << CM_SRC_BITS];
+ int i;
+
+ parent = devm_kasprintf(cprman->dev, GFP_KERNEL,
+ "mux_%s", data->name);
+ if (!parent)
+ return NULL;
+
+ /*
+ * Replace our "xosc" references with the oscillator's
+ * actual name.
+ */
+ for (i = 0; i < data->num_mux_parents; i++) {
+ if (strcmp(data->parents[i], "xosc") == 0)
+ parents[i] = cprman->osc_name;
+ else
+ parents[i] = data->parents[i];
+ }
+
+ clk_register_mux(cprman->dev, parent,
+ parents, data->num_mux_parents,
+ CLK_SET_RATE_PARENT,
+ cprman->regs + data->ctl_reg,
+ CM_SRC_SHIFT, CM_SRC_BITS,
+ 0, &cprman->regs_lock);
+ } else {
+ parent = data->parents[0];
+ }
+
+ memset(&init, 0, sizeof(init));
+ init.parent_names = &parent;
+ init.num_parents = 1;
+ init.name = data->name;
+ init.flags = CLK_IGNORE_UNUSED;
+
+ if (data->is_vpu_clock) {
+ init.ops = &bcm2835_vpu_clock_clk_ops;
+ } else {
+ init.ops = &bcm2835_clock_clk_ops;
+ init.flags |= CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+ }
+
+ clock = devm_kzalloc(cprman->dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return NULL;
+
+ clock->cprman = cprman;
+ clock->data = data;
+ clock->hw.init = &init;
+
+ return devm_clk_register(cprman->dev, &clock->hw);
+}
+
+static int bcm2835_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk **clks;
+ struct bcm2835_cprman *cprman;
+ struct resource *res;
+
+ cprman = devm_kzalloc(dev, sizeof(*cprman), GFP_KERNEL);
+ if (!cprman)
+ return -ENOMEM;
+
+ spin_lock_init(&cprman->regs_lock);
+ cprman->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cprman->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cprman->regs))
+ return PTR_ERR(cprman->regs);
+
+ cprman->osc_name = of_clk_get_parent_name(dev->of_node, 0);
+ if (!cprman->osc_name)
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, cprman);
+
+ cprman->onecell.clk_num = BCM2835_CLOCK_COUNT;
+ cprman->onecell.clks = cprman->clks;
+ clks = cprman->clks;
+
+ clks[BCM2835_PLLA] = bcm2835_register_pll(cprman, &bcm2835_plla_data);
+ clks[BCM2835_PLLB] = bcm2835_register_pll(cprman, &bcm2835_pllb_data);
+ clks[BCM2835_PLLC] = bcm2835_register_pll(cprman, &bcm2835_pllc_data);
+ clks[BCM2835_PLLD] = bcm2835_register_pll(cprman, &bcm2835_plld_data);
+ clks[BCM2835_PLLH] = bcm2835_register_pll(cprman, &bcm2835_pllh_data);
+
+ clks[BCM2835_PLLA_CORE] =
+ bcm2835_register_pll_divider(cprman, &bcm2835_plla_core_data);
+ clks[BCM2835_PLLA_PER] =
+ bcm2835_register_pll_divider(cprman, &bcm2835_plla_per_data);
+ clks[BCM2835_PLLC_CORE0] =
+ bcm2835_register_pll_divider(cprman, &bcm2835_pllc_core0_data);
+ clks[BCM2835_PLLC_CORE1] =
+ bcm2835_register_pll_divider(cprman, &bcm2835_pllc_core1_data);
+ clks[BCM2835_PLLC_CORE2] =
+ bcm2835_register_pll_divider(cprman, &bcm2835_pllc_core2_data);
+ clks[BCM2835_PLLC_PER] =
+ bcm2835_register_pll_divider(cprman, &bcm2835_pllc_per_data);
+ clks[BCM2835_PLLD_CORE] =
+ bcm2835_register_pll_divider(cprman, &bcm2835_plld_core_data);
+ clks[BCM2835_PLLD_PER] =
+ bcm2835_register_pll_divider(cprman, &bcm2835_plld_per_data);
+ clks[BCM2835_PLLH_RCAL] =
+ bcm2835_register_pll_divider(cprman, &bcm2835_pllh_rcal_data);
+ clks[BCM2835_PLLH_AUX] =
+ bcm2835_register_pll_divider(cprman, &bcm2835_pllh_aux_data);
+ clks[BCM2835_PLLH_PIX] =
+ bcm2835_register_pll_divider(cprman, &bcm2835_pllh_pix_data);
+
+ clks[BCM2835_CLOCK_TIMER] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_timer_data);
+ clks[BCM2835_CLOCK_OTP] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_otp_data);
+ clks[BCM2835_CLOCK_TSENS] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_tsens_data);
+ clks[BCM2835_CLOCK_VPU] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_vpu_data);
+ clks[BCM2835_CLOCK_V3D] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_v3d_data);
+ clks[BCM2835_CLOCK_ISP] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_isp_data);
+ clks[BCM2835_CLOCK_H264] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_h264_data);
+ clks[BCM2835_CLOCK_V3D] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_v3d_data);
+ clks[BCM2835_CLOCK_SDRAM] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_sdram_data);
+ clks[BCM2835_CLOCK_UART] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_uart_data);
+ clks[BCM2835_CLOCK_VEC] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_vec_data);
+ clks[BCM2835_CLOCK_HSM] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_hsm_data);
+ clks[BCM2835_CLOCK_EMMC] =
+ bcm2835_register_clock(cprman, &bcm2835_clock_emmc_data);
+
+ /*
+ * CM_PERIICTL (and CM_PERIACTL, CM_SYSCTL and CM_VPUCTL if
+ * you have the debug bit set in the power manager, which we
+ * don't bother exposing) are individual gates off of the
+ * non-stop vpu clock.
+ */
+ clks[BCM2835_CLOCK_PERI_IMAGE] =
+ clk_register_gate(dev, "peri_image", "vpu",
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ cprman->regs + CM_PERIICTL, CM_GATE_BIT,
+ 0, &cprman->regs_lock);
+
+ return of_clk_add_provider(dev->of_node, of_clk_src_onecell_get,
+ &cprman->onecell);
+}
+
+static const struct of_device_id bcm2835_clk_of_match[] = {
+ { .compatible = "brcm,bcm2835-cprman", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bcm2835_clk_of_match);
+
+static struct platform_driver bcm2835_clk_driver = {
+ .driver = {
+ .name = "bcm2835-clk",
+ .of_match_table = bcm2835_clk_of_match,
+ },
+ .probe = bcm2835_clk_probe,
+};
+
+builtin_platform_driver(bcm2835_clk_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("BCM2835 clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/bcm/clk-cygnus.c b/drivers/clk/bcm/clk-cygnus.c
index 316c60337661..3a228b6d4fee 100644
--- a/drivers/clk/bcm/clk-cygnus.c
+++ b/drivers/clk/bcm/clk-cygnus.c
@@ -23,28 +23,30 @@
#include <dt-bindings/clock/bcm-cygnus.h>
#include "clk-iproc.h"
-#define reg_val(o, s, w) { .offset = o, .shift = s, .width = w, }
+#define REG_VAL(o, s, w) { .offset = o, .shift = s, .width = w, }
-#define aon_val(o, pw, ps, is) { .offset = o, .pwr_width = pw, \
+#define AON_VAL(o, pw, ps, is) { .offset = o, .pwr_width = pw, \
.pwr_shift = ps, .iso_shift = is }
-#define sw_ctrl_val(o, s) { .offset = o, .shift = s, }
+#define SW_CTRL_VAL(o, s) { .offset = o, .shift = s, }
-#define asiu_div_val(o, es, hs, hw, ls, lw) \
+#define ASIU_DIV_VAL(o, es, hs, hw, ls, lw) \
{ .offset = o, .en_shift = es, .high_shift = hs, \
.high_width = hw, .low_shift = ls, .low_width = lw }
-#define reset_val(o, rs, prs, kis, kiw, kps, kpw, kas, kaw) { .offset = o, \
- .reset_shift = rs, .p_reset_shift = prs, .ki_shift = kis, \
- .ki_width = kiw, .kp_shift = kps, .kp_width = kpw, .ka_shift = kas, \
+#define RESET_VAL(o, rs, prs) { .offset = o, .reset_shift = rs, \
+ .p_reset_shift = prs }
+
+#define DF_VAL(o, kis, kiw, kps, kpw, kas, kaw) { .offset = o, .ki_shift = kis,\
+ .ki_width = kiw, .kp_shift = kps, .kp_width = kpw, .ka_shift = kas, \
.ka_width = kaw }
-#define vco_ctrl_val(uo, lo) { .u_offset = uo, .l_offset = lo }
+#define VCO_CTRL_VAL(uo, lo) { .u_offset = uo, .l_offset = lo }
-#define enable_val(o, es, hs, bs) { .offset = o, .enable_shift = es, \
+#define ENABLE_VAL(o, es, hs, bs) { .offset = o, .enable_shift = es, \
.hold_shift = hs, .bypass_shift = bs }
-#define asiu_gate_val(o, es) { .offset = o, .en_shift = es }
+#define ASIU_GATE_VAL(o, es) { .offset = o, .en_shift = es }
static void __init cygnus_armpll_init(struct device_node *node)
{
@@ -55,52 +57,53 @@ CLK_OF_DECLARE(cygnus_armpll, "brcm,cygnus-armpll", cygnus_armpll_init);
static const struct iproc_pll_ctrl genpll = {
.flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
IPROC_CLK_PLL_NEEDS_SW_CFG,
- .aon = aon_val(0x0, 2, 1, 0),
- .reset = reset_val(0x0, 11, 10, 4, 3, 0, 4, 7, 3),
- .sw_ctrl = sw_ctrl_val(0x10, 31),
- .ndiv_int = reg_val(0x10, 20, 10),
- .ndiv_frac = reg_val(0x10, 0, 20),
- .pdiv = reg_val(0x14, 0, 4),
- .vco_ctrl = vco_ctrl_val(0x18, 0x1c),
- .status = reg_val(0x28, 12, 1),
+ .aon = AON_VAL(0x0, 2, 1, 0),
+ .reset = RESET_VAL(0x0, 11, 10),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x10, 31),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x18, 0x1c),
+ .status = REG_VAL(0x28, 12, 1),
};
static const struct iproc_clk_ctrl genpll_clk[] = {
[BCM_CYGNUS_GENPLL_AXI21_CLK] = {
.channel = BCM_CYGNUS_GENPLL_AXI21_CLK,
.flags = IPROC_CLK_AON,
- .enable = enable_val(0x4, 6, 0, 12),
- .mdiv = reg_val(0x20, 0, 8),
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x20, 0, 8),
},
[BCM_CYGNUS_GENPLL_250MHZ_CLK] = {
.channel = BCM_CYGNUS_GENPLL_250MHZ_CLK,
.flags = IPROC_CLK_AON,
- .enable = enable_val(0x4, 7, 1, 13),
- .mdiv = reg_val(0x20, 10, 8),
+ .enable = ENABLE_VAL(0x4, 7, 1, 13),
+ .mdiv = REG_VAL(0x20, 10, 8),
},
[BCM_CYGNUS_GENPLL_IHOST_SYS_CLK] = {
.channel = BCM_CYGNUS_GENPLL_IHOST_SYS_CLK,
.flags = IPROC_CLK_AON,
- .enable = enable_val(0x4, 8, 2, 14),
- .mdiv = reg_val(0x20, 20, 8),
+ .enable = ENABLE_VAL(0x4, 8, 2, 14),
+ .mdiv = REG_VAL(0x20, 20, 8),
},
[BCM_CYGNUS_GENPLL_ENET_SW_CLK] = {
.channel = BCM_CYGNUS_GENPLL_ENET_SW_CLK,
.flags = IPROC_CLK_AON,
- .enable = enable_val(0x4, 9, 3, 15),
- .mdiv = reg_val(0x24, 0, 8),
+ .enable = ENABLE_VAL(0x4, 9, 3, 15),
+ .mdiv = REG_VAL(0x24, 0, 8),
},
[BCM_CYGNUS_GENPLL_AUDIO_125_CLK] = {
.channel = BCM_CYGNUS_GENPLL_AUDIO_125_CLK,
.flags = IPROC_CLK_AON,
- .enable = enable_val(0x4, 10, 4, 16),
- .mdiv = reg_val(0x24, 10, 8),
+ .enable = ENABLE_VAL(0x4, 10, 4, 16),
+ .mdiv = REG_VAL(0x24, 10, 8),
},
[BCM_CYGNUS_GENPLL_CAN_CLK] = {
.channel = BCM_CYGNUS_GENPLL_CAN_CLK,
.flags = IPROC_CLK_AON,
- .enable = enable_val(0x4, 11, 5, 17),
- .mdiv = reg_val(0x24, 20, 8),
+ .enable = ENABLE_VAL(0x4, 11, 5, 17),
+ .mdiv = REG_VAL(0x24, 20, 8),
},
};
@@ -113,51 +116,52 @@ CLK_OF_DECLARE(cygnus_genpll, "brcm,cygnus-genpll", cygnus_genpll_clk_init);
static const struct iproc_pll_ctrl lcpll0 = {
.flags = IPROC_CLK_AON | IPROC_CLK_PLL_NEEDS_SW_CFG,
- .aon = aon_val(0x0, 2, 5, 4),
- .reset = reset_val(0x0, 31, 30, 27, 3, 23, 4, 19, 4),
- .sw_ctrl = sw_ctrl_val(0x4, 31),
- .ndiv_int = reg_val(0x4, 16, 10),
- .pdiv = reg_val(0x4, 26, 4),
- .vco_ctrl = vco_ctrl_val(0x10, 0x14),
- .status = reg_val(0x18, 12, 1),
+ .aon = AON_VAL(0x0, 2, 5, 4),
+ .reset = RESET_VAL(0x0, 31, 30),
+ .dig_filter = DF_VAL(0x0, 27, 3, 23, 4, 19, 4),
+ .sw_ctrl = SW_CTRL_VAL(0x4, 31),
+ .ndiv_int = REG_VAL(0x4, 16, 10),
+ .pdiv = REG_VAL(0x4, 26, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x10, 0x14),
+ .status = REG_VAL(0x18, 12, 1),
};
static const struct iproc_clk_ctrl lcpll0_clk[] = {
[BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK] = {
.channel = BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK,
.flags = IPROC_CLK_AON,
- .enable = enable_val(0x0, 7, 1, 13),
- .mdiv = reg_val(0x8, 0, 8),
+ .enable = ENABLE_VAL(0x0, 7, 1, 13),
+ .mdiv = REG_VAL(0x8, 0, 8),
},
[BCM_CYGNUS_LCPLL0_DDR_PHY_CLK] = {
.channel = BCM_CYGNUS_LCPLL0_DDR_PHY_CLK,
.flags = IPROC_CLK_AON,
- .enable = enable_val(0x0, 8, 2, 14),
- .mdiv = reg_val(0x8, 10, 8),
+ .enable = ENABLE_VAL(0x0, 8, 2, 14),
+ .mdiv = REG_VAL(0x8, 10, 8),
},
[BCM_CYGNUS_LCPLL0_SDIO_CLK] = {
.channel = BCM_CYGNUS_LCPLL0_SDIO_CLK,
.flags = IPROC_CLK_AON,
- .enable = enable_val(0x0, 9, 3, 15),
- .mdiv = reg_val(0x8, 20, 8),
+ .enable = ENABLE_VAL(0x0, 9, 3, 15),
+ .mdiv = REG_VAL(0x8, 20, 8),
},
[BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK] = {
.channel = BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK,
.flags = IPROC_CLK_AON,
- .enable = enable_val(0x0, 10, 4, 16),
- .mdiv = reg_val(0xc, 0, 8),
+ .enable = ENABLE_VAL(0x0, 10, 4, 16),
+ .mdiv = REG_VAL(0xc, 0, 8),
},
[BCM_CYGNUS_LCPLL0_SMART_CARD_CLK] = {
.channel = BCM_CYGNUS_LCPLL0_SMART_CARD_CLK,
.flags = IPROC_CLK_AON,
- .enable = enable_val(0x0, 11, 5, 17),
- .mdiv = reg_val(0xc, 10, 8),
+ .enable = ENABLE_VAL(0x0, 11, 5, 17),
+ .mdiv = REG_VAL(0xc, 10, 8),
},
[BCM_CYGNUS_LCPLL0_CH5_UNUSED] = {
.channel = BCM_CYGNUS_LCPLL0_CH5_UNUSED,
.flags = IPROC_CLK_AON,
- .enable = enable_val(0x0, 12, 6, 18),
- .mdiv = reg_val(0xc, 20, 8),
+ .enable = ENABLE_VAL(0x0, 12, 6, 18),
+ .mdiv = REG_VAL(0xc, 20, 8),
},
};
@@ -189,52 +193,53 @@ static const struct iproc_pll_vco_param mipipll_vco_params[] = {
static const struct iproc_pll_ctrl mipipll = {
.flags = IPROC_CLK_PLL_ASIU | IPROC_CLK_PLL_HAS_NDIV_FRAC |
IPROC_CLK_NEEDS_READ_BACK,
- .aon = aon_val(0x0, 4, 17, 16),
- .asiu = asiu_gate_val(0x0, 3),
- .reset = reset_val(0x0, 11, 10, 4, 3, 0, 4, 7, 4),
- .ndiv_int = reg_val(0x10, 20, 10),
- .ndiv_frac = reg_val(0x10, 0, 20),
- .pdiv = reg_val(0x14, 0, 4),
- .vco_ctrl = vco_ctrl_val(0x18, 0x1c),
- .status = reg_val(0x28, 12, 1),
+ .aon = AON_VAL(0x0, 4, 17, 16),
+ .asiu = ASIU_GATE_VAL(0x0, 3),
+ .reset = RESET_VAL(0x0, 11, 10),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 4),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x18, 0x1c),
+ .status = REG_VAL(0x28, 12, 1),
};
static const struct iproc_clk_ctrl mipipll_clk[] = {
[BCM_CYGNUS_MIPIPLL_CH0_UNUSED] = {
.channel = BCM_CYGNUS_MIPIPLL_CH0_UNUSED,
.flags = IPROC_CLK_NEEDS_READ_BACK,
- .enable = enable_val(0x4, 12, 6, 18),
- .mdiv = reg_val(0x20, 0, 8),
+ .enable = ENABLE_VAL(0x4, 12, 6, 18),
+ .mdiv = REG_VAL(0x20, 0, 8),
},
[BCM_CYGNUS_MIPIPLL_CH1_LCD] = {
.channel = BCM_CYGNUS_MIPIPLL_CH1_LCD,
.flags = IPROC_CLK_NEEDS_READ_BACK,
- .enable = enable_val(0x4, 13, 7, 19),
- .mdiv = reg_val(0x20, 10, 8),
+ .enable = ENABLE_VAL(0x4, 13, 7, 19),
+ .mdiv = REG_VAL(0x20, 10, 8),
},
[BCM_CYGNUS_MIPIPLL_CH2_V3D] = {
.channel = BCM_CYGNUS_MIPIPLL_CH2_V3D,
.flags = IPROC_CLK_NEEDS_READ_BACK,
- .enable = enable_val(0x4, 14, 8, 20),
- .mdiv = reg_val(0x20, 20, 8),
+ .enable = ENABLE_VAL(0x4, 14, 8, 20),
+ .mdiv = REG_VAL(0x20, 20, 8),
},
[BCM_CYGNUS_MIPIPLL_CH3_UNUSED] = {
.channel = BCM_CYGNUS_MIPIPLL_CH3_UNUSED,
.flags = IPROC_CLK_NEEDS_READ_BACK,
- .enable = enable_val(0x4, 15, 9, 21),
- .mdiv = reg_val(0x24, 0, 8),
+ .enable = ENABLE_VAL(0x4, 15, 9, 21),
+ .mdiv = REG_VAL(0x24, 0, 8),
},
[BCM_CYGNUS_MIPIPLL_CH4_UNUSED] = {
.channel = BCM_CYGNUS_MIPIPLL_CH4_UNUSED,
.flags = IPROC_CLK_NEEDS_READ_BACK,
- .enable = enable_val(0x4, 16, 10, 22),
- .mdiv = reg_val(0x24, 10, 8),
+ .enable = ENABLE_VAL(0x4, 16, 10, 22),
+ .mdiv = REG_VAL(0x24, 10, 8),
},
[BCM_CYGNUS_MIPIPLL_CH5_UNUSED] = {
.channel = BCM_CYGNUS_MIPIPLL_CH5_UNUSED,
.flags = IPROC_CLK_NEEDS_READ_BACK,
- .enable = enable_val(0x4, 17, 11, 23),
- .mdiv = reg_val(0x24, 20, 8),
+ .enable = ENABLE_VAL(0x4, 17, 11, 23),
+ .mdiv = REG_VAL(0x24, 20, 8),
},
};
@@ -247,15 +252,15 @@ static void __init cygnus_mipipll_clk_init(struct device_node *node)
CLK_OF_DECLARE(cygnus_mipipll, "brcm,cygnus-mipipll", cygnus_mipipll_clk_init);
static const struct iproc_asiu_div asiu_div[] = {
- [BCM_CYGNUS_ASIU_KEYPAD_CLK] = asiu_div_val(0x0, 31, 16, 10, 0, 10),
- [BCM_CYGNUS_ASIU_ADC_CLK] = asiu_div_val(0x4, 31, 16, 10, 0, 10),
- [BCM_CYGNUS_ASIU_PWM_CLK] = asiu_div_val(0x8, 31, 16, 10, 0, 10),
+ [BCM_CYGNUS_ASIU_KEYPAD_CLK] = ASIU_DIV_VAL(0x0, 31, 16, 10, 0, 10),
+ [BCM_CYGNUS_ASIU_ADC_CLK] = ASIU_DIV_VAL(0x4, 31, 16, 10, 0, 10),
+ [BCM_CYGNUS_ASIU_PWM_CLK] = ASIU_DIV_VAL(0x8, 31, 16, 10, 0, 10),
};
static const struct iproc_asiu_gate asiu_gate[] = {
- [BCM_CYGNUS_ASIU_KEYPAD_CLK] = asiu_gate_val(0x0, 7),
- [BCM_CYGNUS_ASIU_ADC_CLK] = asiu_gate_val(0x0, 9),
- [BCM_CYGNUS_ASIU_PWM_CLK] = asiu_gate_val(IPROC_CLK_INVALID_OFFSET, 0),
+ [BCM_CYGNUS_ASIU_KEYPAD_CLK] = ASIU_GATE_VAL(0x0, 7),
+ [BCM_CYGNUS_ASIU_ADC_CLK] = ASIU_GATE_VAL(0x0, 9),
+ [BCM_CYGNUS_ASIU_PWM_CLK] = ASIU_GATE_VAL(IPROC_CLK_INVALID_OFFSET, 0),
};
static void __init cygnus_asiu_init(struct device_node *node)
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 2dda4e8295a9..afd5891ac9e6 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -74,7 +74,8 @@ struct iproc_clk {
};
struct iproc_pll {
- void __iomem *pll_base;
+ void __iomem *status_base;
+ void __iomem *control_base;
void __iomem *pwr_base;
void __iomem *asiu_base;
@@ -127,7 +128,7 @@ static int pll_wait_for_lock(struct iproc_pll *pll)
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
for (i = 0; i < LOCK_DELAY; i++) {
- u32 val = readl(pll->pll_base + ctrl->status.offset);
+ u32 val = readl(pll->status_base + ctrl->status.offset);
if (val & (1 << ctrl->status.shift))
return 0;
@@ -137,6 +138,18 @@ static int pll_wait_for_lock(struct iproc_pll *pll)
return -EIO;
}
+static void iproc_pll_write(const struct iproc_pll *pll, void __iomem *base,
+ const u32 offset, u32 val)
+{
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+
+ writel(val, base + offset);
+
+ if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK &&
+ (base == pll->status_base || base == pll->control_base)))
+ val = readl(base + offset);
+}
+
static void __pll_disable(struct iproc_pll *pll)
{
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
@@ -145,17 +158,25 @@ static void __pll_disable(struct iproc_pll *pll)
if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
val = readl(pll->asiu_base + ctrl->asiu.offset);
val &= ~(1 << ctrl->asiu.en_shift);
- writel(val, pll->asiu_base + ctrl->asiu.offset);
+ iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val);
+ }
+
+ if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) {
+ val = readl(pll->control_base + ctrl->aon.offset);
+ val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
+ iproc_pll_write(pll, pll->control_base, ctrl->aon.offset, val);
}
- /* latch input value so core power can be shut down */
- val = readl(pll->pwr_base + ctrl->aon.offset);
- val |= (1 << ctrl->aon.iso_shift);
- writel(val, pll->pwr_base + ctrl->aon.offset);
+ if (pll->pwr_base) {
+ /* latch input value so core power can be shut down */
+ val = readl(pll->pwr_base + ctrl->aon.offset);
+ val |= 1 << ctrl->aon.iso_shift;
+ iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
- /* power down the core */
- val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
- writel(val, pll->pwr_base + ctrl->aon.offset);
+ /* power down the core */
+ val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
+ iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
+ }
}
static int __pll_enable(struct iproc_pll *pll)
@@ -163,17 +184,25 @@ static int __pll_enable(struct iproc_pll *pll)
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
u32 val;
- /* power up the PLL and make sure it's not latched */
- val = readl(pll->pwr_base + ctrl->aon.offset);
- val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
- val &= ~(1 << ctrl->aon.iso_shift);
- writel(val, pll->pwr_base + ctrl->aon.offset);
+ if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) {
+ val = readl(pll->control_base + ctrl->aon.offset);
+ val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
+ iproc_pll_write(pll, pll->control_base, ctrl->aon.offset, val);
+ }
+
+ if (pll->pwr_base) {
+ /* power up the PLL and make sure it's not latched */
+ val = readl(pll->pwr_base + ctrl->aon.offset);
+ val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
+ val &= ~(1 << ctrl->aon.iso_shift);
+ iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
+ }
/* certain PLLs also need to be ungated from the ASIU top level */
if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
val = readl(pll->asiu_base + ctrl->asiu.offset);
val |= (1 << ctrl->asiu.en_shift);
- writel(val, pll->asiu_base + ctrl->asiu.offset);
+ iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val);
}
return 0;
@@ -185,11 +214,9 @@ static void __pll_put_in_reset(struct iproc_pll *pll)
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
- val = readl(pll->pll_base + reset->offset);
+ val = readl(pll->control_base + reset->offset);
val &= ~(1 << reset->reset_shift | 1 << reset->p_reset_shift);
- writel(val, pll->pll_base + reset->offset);
- if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
- readl(pll->pll_base + reset->offset);
+ iproc_pll_write(pll, pll->control_base, reset->offset, val);
}
static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp,
@@ -198,17 +225,19 @@ static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp,
u32 val;
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
+ const struct iproc_pll_dig_filter_ctrl *dig_filter = &ctrl->dig_filter;
+
+ val = readl(pll->control_base + dig_filter->offset);
+ val &= ~(bit_mask(dig_filter->ki_width) << dig_filter->ki_shift |
+ bit_mask(dig_filter->kp_width) << dig_filter->kp_shift |
+ bit_mask(dig_filter->ka_width) << dig_filter->ka_shift);
+ val |= ki << dig_filter->ki_shift | kp << dig_filter->kp_shift |
+ ka << dig_filter->ka_shift;
+ iproc_pll_write(pll, pll->control_base, dig_filter->offset, val);
- val = readl(pll->pll_base + reset->offset);
- val &= ~(bit_mask(reset->ki_width) << reset->ki_shift |
- bit_mask(reset->kp_width) << reset->kp_shift |
- bit_mask(reset->ka_width) << reset->ka_shift);
- val |= ki << reset->ki_shift | kp << reset->kp_shift |
- ka << reset->ka_shift;
+ val = readl(pll->control_base + reset->offset);
val |= 1 << reset->reset_shift | 1 << reset->p_reset_shift;
- writel(val, pll->pll_base + reset->offset);
- if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
- readl(pll->pll_base + reset->offset);
+ iproc_pll_write(pll, pll->control_base, reset->offset, val);
}
static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
@@ -263,10 +292,9 @@ static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
/* put PLL in reset */
__pll_put_in_reset(pll);
- writel(0, pll->pll_base + ctrl->vco_ctrl.u_offset);
- if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
- readl(pll->pll_base + ctrl->vco_ctrl.u_offset);
- val = readl(pll->pll_base + ctrl->vco_ctrl.l_offset);
+ iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.u_offset, 0);
+
+ val = readl(pll->control_base + ctrl->vco_ctrl.l_offset);
if (rate >= VCO_LOW && rate < VCO_MID)
val |= (1 << PLL_VCO_LOW_SHIFT);
@@ -276,36 +304,29 @@ static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
else
val |= (1 << PLL_VCO_HIGH_SHIFT);
- writel(val, pll->pll_base + ctrl->vco_ctrl.l_offset);
- if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
- readl(pll->pll_base + ctrl->vco_ctrl.l_offset);
+ iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.l_offset, val);
/* program integer part of NDIV */
- val = readl(pll->pll_base + ctrl->ndiv_int.offset);
+ val = readl(pll->control_base + ctrl->ndiv_int.offset);
val &= ~(bit_mask(ctrl->ndiv_int.width) << ctrl->ndiv_int.shift);
val |= vco->ndiv_int << ctrl->ndiv_int.shift;
- writel(val, pll->pll_base + ctrl->ndiv_int.offset);
- if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
- readl(pll->pll_base + ctrl->ndiv_int.offset);
+ iproc_pll_write(pll, pll->control_base, ctrl->ndiv_int.offset, val);
/* program fractional part of NDIV */
if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
- val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
+ val = readl(pll->control_base + ctrl->ndiv_frac.offset);
val &= ~(bit_mask(ctrl->ndiv_frac.width) <<
ctrl->ndiv_frac.shift);
val |= vco->ndiv_frac << ctrl->ndiv_frac.shift;
- writel(val, pll->pll_base + ctrl->ndiv_frac.offset);
- if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
- readl(pll->pll_base + ctrl->ndiv_frac.offset);
+ iproc_pll_write(pll, pll->control_base, ctrl->ndiv_frac.offset,
+ val);
}
/* program PDIV */
- val = readl(pll->pll_base + ctrl->pdiv.offset);
+ val = readl(pll->control_base + ctrl->pdiv.offset);
val &= ~(bit_mask(ctrl->pdiv.width) << ctrl->pdiv.shift);
val |= vco->pdiv << ctrl->pdiv.shift;
- writel(val, pll->pll_base + ctrl->pdiv.offset);
- if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
- readl(pll->pll_base + ctrl->pdiv.offset);
+ iproc_pll_write(pll, pll->control_base, ctrl->pdiv.offset, val);
__pll_bring_out_reset(pll, kp, ka, ki);
@@ -345,14 +366,14 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
struct iproc_pll *pll = clk->pll;
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
u32 val;
- u64 ndiv;
- unsigned int ndiv_int, ndiv_frac, pdiv;
+ u64 ndiv, ndiv_int, ndiv_frac;
+ unsigned int pdiv;
if (parent_rate == 0)
return 0;
/* PLL needs to be locked */
- val = readl(pll->pll_base + ctrl->status.offset);
+ val = readl(pll->status_base + ctrl->status.offset);
if ((val & (1 << ctrl->status.shift)) == 0) {
clk->rate = 0;
return 0;
@@ -363,25 +384,22 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
*
* ((ndiv_int + ndiv_frac / 2^20) * (parent clock rate / pdiv)
*/
- val = readl(pll->pll_base + ctrl->ndiv_int.offset);
+ val = readl(pll->control_base + ctrl->ndiv_int.offset);
ndiv_int = (val >> ctrl->ndiv_int.shift) &
bit_mask(ctrl->ndiv_int.width);
- ndiv = (u64)ndiv_int << ctrl->ndiv_int.shift;
+ ndiv = ndiv_int << 20;
if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
- val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
+ val = readl(pll->control_base + ctrl->ndiv_frac.offset);
ndiv_frac = (val >> ctrl->ndiv_frac.shift) &
bit_mask(ctrl->ndiv_frac.width);
-
- if (ndiv_frac != 0)
- ndiv = ((u64)ndiv_int << ctrl->ndiv_int.shift) |
- ndiv_frac;
+ ndiv += ndiv_frac;
}
- val = readl(pll->pll_base + ctrl->pdiv.offset);
+ val = readl(pll->control_base + ctrl->pdiv.offset);
pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
- clk->rate = (ndiv * parent_rate) >> ctrl->ndiv_int.shift;
+ clk->rate = (ndiv * parent_rate) >> 20;
if (pdiv == 0)
clk->rate *= 2;
@@ -443,16 +461,14 @@ static int iproc_clk_enable(struct clk_hw *hw)
u32 val;
/* channel enable is active low */
- val = readl(pll->pll_base + ctrl->enable.offset);
+ val = readl(pll->control_base + ctrl->enable.offset);
val &= ~(1 << ctrl->enable.enable_shift);
- writel(val, pll->pll_base + ctrl->enable.offset);
+ iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
/* also make sure channel is not held */
- val = readl(pll->pll_base + ctrl->enable.offset);
+ val = readl(pll->control_base + ctrl->enable.offset);
val &= ~(1 << ctrl->enable.hold_shift);
- writel(val, pll->pll_base + ctrl->enable.offset);
- if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
- readl(pll->pll_base + ctrl->enable.offset);
+ iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
return 0;
}
@@ -467,11 +483,9 @@ static void iproc_clk_disable(struct clk_hw *hw)
if (ctrl->flags & IPROC_CLK_AON)
return;
- val = readl(pll->pll_base + ctrl->enable.offset);
+ val = readl(pll->control_base + ctrl->enable.offset);
val |= 1 << ctrl->enable.enable_shift;
- writel(val, pll->pll_base + ctrl->enable.offset);
- if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
- readl(pll->pll_base + ctrl->enable.offset);
+ iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
}
static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
@@ -486,7 +500,7 @@ static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
if (parent_rate == 0)
return 0;
- val = readl(pll->pll_base + ctrl->mdiv.offset);
+ val = readl(pll->control_base + ctrl->mdiv.offset);
mdiv = (val >> ctrl->mdiv.shift) & bit_mask(ctrl->mdiv.width);
if (mdiv == 0)
mdiv = 256;
@@ -533,16 +547,14 @@ static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (div > 256)
return -EINVAL;
- val = readl(pll->pll_base + ctrl->mdiv.offset);
+ val = readl(pll->control_base + ctrl->mdiv.offset);
if (div == 256) {
val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
} else {
val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
val |= div << ctrl->mdiv.shift;
}
- writel(val, pll->pll_base + ctrl->mdiv.offset);
- if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
- readl(pll->pll_base + ctrl->mdiv.offset);
+ iproc_pll_write(pll, pll->control_base, ctrl->mdiv.offset, val);
clk->rate = parent_rate / div;
return 0;
@@ -567,11 +579,10 @@ static void iproc_pll_sw_cfg(struct iproc_pll *pll)
if (ctrl->flags & IPROC_CLK_PLL_NEEDS_SW_CFG) {
u32 val;
- val = readl(pll->pll_base + ctrl->sw_ctrl.offset);
+ val = readl(pll->control_base + ctrl->sw_ctrl.offset);
val |= BIT(ctrl->sw_ctrl.shift);
- writel(val, pll->pll_base + ctrl->sw_ctrl.offset);
- if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
- readl(pll->pll_base + ctrl->sw_ctrl.offset);
+ iproc_pll_write(pll, pll->control_base, ctrl->sw_ctrl.offset,
+ val);
}
}
@@ -606,13 +617,12 @@ void __init iproc_pll_clk_setup(struct device_node *node,
if (WARN_ON(!pll->clks))
goto err_clks;
- pll->pll_base = of_iomap(node, 0);
- if (WARN_ON(!pll->pll_base))
+ pll->control_base = of_iomap(node, 0);
+ if (WARN_ON(!pll->control_base))
goto err_pll_iomap;
+ /* Some SoCs do not require the pwr_base, thus failing is not fatal */
pll->pwr_base = of_iomap(node, 1);
- if (WARN_ON(!pll->pwr_base))
- goto err_pwr_iomap;
/* some PLLs require gating control at the top ASIU level */
if (pll_ctrl->flags & IPROC_CLK_PLL_ASIU) {
@@ -621,6 +631,16 @@ void __init iproc_pll_clk_setup(struct device_node *node,
goto err_asiu_iomap;
}
+ if (pll_ctrl->flags & IPROC_CLK_PLL_SPLIT_STAT_CTRL) {
+ /* Some SoCs have a split status/control. If this does not
+ * exist, assume they are unified.
+ */
+ pll->status_base = of_iomap(node, 2);
+ if (!pll->status_base)
+ goto err_status_iomap;
+ } else
+ pll->status_base = pll->control_base;
+
/* initialize and register the PLL itself */
pll->ctrl = pll_ctrl;
@@ -691,14 +711,18 @@ err_clk_register:
clk_unregister(pll->clk_data.clks[i]);
err_pll_register:
+ if (pll->status_base != pll->control_base)
+ iounmap(pll->status_base);
+
+err_status_iomap:
if (pll->asiu_base)
iounmap(pll->asiu_base);
err_asiu_iomap:
- iounmap(pll->pwr_base);
+ if (pll->pwr_base)
+ iounmap(pll->pwr_base);
-err_pwr_iomap:
- iounmap(pll->pll_base);
+ iounmap(pll->control_base);
err_pll_iomap:
kfree(pll->clks);
diff --git a/drivers/clk/bcm/clk-iproc.h b/drivers/clk/bcm/clk-iproc.h
index d834b7abd5c6..8988de70a98c 100644
--- a/drivers/clk/bcm/clk-iproc.h
+++ b/drivers/clk/bcm/clk-iproc.h
@@ -49,6 +49,18 @@
#define IPROC_CLK_PLL_NEEDS_SW_CFG BIT(4)
/*
+ * Some PLLs use a different way to control clock power, via the PWRDWN bit in
+ * the PLL control register
+ */
+#define IPROC_CLK_EMBED_PWRCTRL BIT(5)
+
+/*
+ * Some PLLs have separate registers for Status and Control. Identify this to
+ * let the driver know if additional registers need to be used
+ */
+#define IPROC_CLK_PLL_SPLIT_STAT_CTRL BIT(6)
+
+/*
* Parameters for VCO frequency configuration
*
* VCO frequency =
@@ -88,12 +100,19 @@ struct iproc_pll_aon_pwr_ctrl {
};
/*
- * Control of the PLL reset, with Ki, Kp, and Ka parameters
+ * Control of the PLL reset
*/
struct iproc_pll_reset_ctrl {
unsigned int offset;
unsigned int reset_shift;
unsigned int p_reset_shift;
+};
+
+/*
+ * Control of the Ki, Kp, and Ka parameters
+ */
+struct iproc_pll_dig_filter_ctrl {
+ unsigned int offset;
unsigned int ki_shift;
unsigned int ki_width;
unsigned int kp_shift;
@@ -123,6 +142,7 @@ struct iproc_pll_ctrl {
struct iproc_pll_aon_pwr_ctrl aon;
struct iproc_asiu_gate asiu;
struct iproc_pll_reset_ctrl reset;
+ struct iproc_pll_dig_filter_ctrl dig_filter;
struct iproc_pll_sw_ctrl sw_ctrl;
struct iproc_clk_reg_op ndiv_int;
struct iproc_clk_reg_op ndiv_frac;
diff --git a/drivers/clk/bcm/clk-ns2.c b/drivers/clk/bcm/clk-ns2.c
new file mode 100644
index 000000000000..a564e9248814
--- /dev/null
+++ b/drivers/clk/bcm/clk-ns2.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/bcm-ns2.h>
+#include "clk-iproc.h"
+
+#define REG_VAL(o, s, w) { .offset = o, .shift = s, .width = w, }
+
+#define AON_VAL(o, pw, ps, is) { .offset = o, .pwr_width = pw, \
+ .pwr_shift = ps, .iso_shift = is }
+
+#define RESET_VAL(o, rs, prs) { .offset = o, .reset_shift = rs, \
+ .p_reset_shift = prs }
+
+#define DF_VAL(o, kis, kiw, kps, kpw, kas, kaw) { .offset = o, .ki_shift = kis,\
+ .ki_width = kiw, .kp_shift = kps, .kp_width = kpw, .ka_shift = kas, \
+ .ka_width = kaw }
+
+#define VCO_CTRL_VAL(uo, lo) { .u_offset = uo, .l_offset = lo }
+
+#define ENABLE_VAL(o, es, hs, bs) { .offset = o, .enable_shift = es, \
+ .hold_shift = hs, .bypass_shift = bs }
+
+static const struct iproc_pll_ctrl genpll_scr = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL,
+ .aon = AON_VAL(0x0, 1, 15, 12),
+ .reset = RESET_VAL(0x4, 2, 1),
+ .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 2, 3),
+ .ndiv_int = REG_VAL(0x8, 4, 10),
+ .pdiv = REG_VAL(0x8, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x10, 0xc),
+ .status = REG_VAL(0x0, 27, 1),
+};
+
+
+static const struct iproc_clk_ctrl genpll_scr_clk[] = {
+ /* bypass_shift, the last value passed into ENABLE_VAL(), is not defined
+ * in NS2. However, it doesn't appear to be used anywhere, so setting
+ * it to 0.
+ */
+ [BCM_NS2_GENPLL_SCR_SCR_CLK] = {
+ .channel = BCM_NS2_GENPLL_SCR_SCR_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 18, 12, 0),
+ .mdiv = REG_VAL(0x18, 0, 8),
+ },
+ [BCM_NS2_GENPLL_SCR_FS_CLK] = {
+ .channel = BCM_NS2_GENPLL_SCR_FS_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 19, 13, 0),
+ .mdiv = REG_VAL(0x18, 8, 8),
+ },
+ [BCM_NS2_GENPLL_SCR_AUDIO_CLK] = {
+ .channel = BCM_NS2_GENPLL_SCR_AUDIO_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 20, 14, 0),
+ .mdiv = REG_VAL(0x14, 0, 8),
+ },
+ [BCM_NS2_GENPLL_SCR_CH3_UNUSED] = {
+ .channel = BCM_NS2_GENPLL_SCR_CH3_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 21, 15, 0),
+ .mdiv = REG_VAL(0x14, 8, 8),
+ },
+ [BCM_NS2_GENPLL_SCR_CH4_UNUSED] = {
+ .channel = BCM_NS2_GENPLL_SCR_CH4_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 22, 16, 0),
+ .mdiv = REG_VAL(0x14, 16, 8),
+ },
+ [BCM_NS2_GENPLL_SCR_CH5_UNUSED] = {
+ .channel = BCM_NS2_GENPLL_SCR_CH5_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 23, 17, 0),
+ .mdiv = REG_VAL(0x14, 24, 8),
+ },
+};
+
+static void __init ns2_genpll_scr_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &genpll_scr, NULL, 0, genpll_scr_clk,
+ ARRAY_SIZE(genpll_scr_clk));
+}
+CLK_OF_DECLARE(ns2_genpll_src_clk, "brcm,ns2-genpll-scr",
+ ns2_genpll_scr_clk_init);
+
+static const struct iproc_pll_ctrl genpll_sw = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL,
+ .aon = AON_VAL(0x0, 2, 9, 8),
+ .reset = RESET_VAL(0x4, 2, 1),
+ .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 2, 3),
+ .ndiv_int = REG_VAL(0x8, 4, 10),
+ .pdiv = REG_VAL(0x8, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x10, 0xc),
+ .status = REG_VAL(0x0, 13, 1),
+};
+
+static const struct iproc_clk_ctrl genpll_sw_clk[] = {
+ /* bypass_shift, the last value passed into ENABLE_VAL(), is not defined
+ * in NS2. However, it doesn't appear to be used anywhere, so setting
+ * it to 0.
+ */
+ [BCM_NS2_GENPLL_SW_RPE_CLK] = {
+ .channel = BCM_NS2_GENPLL_SW_RPE_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 18, 12, 0),
+ .mdiv = REG_VAL(0x18, 0, 8),
+ },
+ [BCM_NS2_GENPLL_SW_250_CLK] = {
+ .channel = BCM_NS2_GENPLL_SW_250_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 19, 13, 0),
+ .mdiv = REG_VAL(0x18, 8, 8),
+ },
+ [BCM_NS2_GENPLL_SW_NIC_CLK] = {
+ .channel = BCM_NS2_GENPLL_SW_NIC_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 20, 14, 0),
+ .mdiv = REG_VAL(0x14, 0, 8),
+ },
+ [BCM_NS2_GENPLL_SW_CHIMP_CLK] = {
+ .channel = BCM_NS2_GENPLL_SW_CHIMP_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 21, 15, 0),
+ .mdiv = REG_VAL(0x14, 8, 8),
+ },
+ [BCM_NS2_GENPLL_SW_PORT_CLK] = {
+ .channel = BCM_NS2_GENPLL_SW_PORT_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 22, 16, 0),
+ .mdiv = REG_VAL(0x14, 16, 8),
+ },
+ [BCM_NS2_GENPLL_SW_SDIO_CLK] = {
+ .channel = BCM_NS2_GENPLL_SW_SDIO_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 23, 17, 0),
+ .mdiv = REG_VAL(0x14, 24, 8),
+ },
+};
+
+static void __init ns2_genpll_sw_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &genpll_sw, NULL, 0, genpll_sw_clk,
+ ARRAY_SIZE(genpll_sw_clk));
+}
+CLK_OF_DECLARE(ns2_genpll_sw_clk, "brcm,ns2-genpll-sw",
+ ns2_genpll_sw_clk_init);
+
+static const struct iproc_pll_ctrl lcpll_ddr = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL,
+ .aon = AON_VAL(0x0, 2, 1, 0),
+ .reset = RESET_VAL(0x4, 2, 1),
+ .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 1, 4),
+ .ndiv_int = REG_VAL(0x8, 4, 10),
+ .pdiv = REG_VAL(0x8, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x10, 0xc),
+ .status = REG_VAL(0x0, 0, 1),
+};
+
+static const struct iproc_clk_ctrl lcpll_ddr_clk[] = {
+ /* bypass_shift, the last value passed into ENABLE_VAL(), is not defined
+ * in NS2. However, it doesn't appear to be used anywhere, so setting
+ * it to 0.
+ */
+ [BCM_NS2_LCPLL_DDR_PCIE_SATA_USB_CLK] = {
+ .channel = BCM_NS2_LCPLL_DDR_PCIE_SATA_USB_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 18, 12, 0),
+ .mdiv = REG_VAL(0x14, 0, 8),
+ },
+ [BCM_NS2_LCPLL_DDR_DDR_CLK] = {
+ .channel = BCM_NS2_LCPLL_DDR_DDR_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 19, 13, 0),
+ .mdiv = REG_VAL(0x14, 8, 8),
+ },
+ [BCM_NS2_LCPLL_DDR_CH2_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_DDR_CH2_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 20, 14, 0),
+ .mdiv = REG_VAL(0x10, 0, 8),
+ },
+ [BCM_NS2_LCPLL_DDR_CH3_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_DDR_CH3_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 21, 15, 0),
+ .mdiv = REG_VAL(0x10, 8, 8),
+ },
+ [BCM_NS2_LCPLL_DDR_CH4_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_DDR_CH4_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 22, 16, 0),
+ .mdiv = REG_VAL(0x10, 16, 8),
+ },
+ [BCM_NS2_LCPLL_DDR_CH5_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_DDR_CH5_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 23, 17, 0),
+ .mdiv = REG_VAL(0x10, 24, 8),
+ },
+};
+
+static void __init ns2_lcpll_ddr_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &lcpll_ddr, NULL, 0, lcpll_ddr_clk,
+ ARRAY_SIZE(lcpll_ddr_clk));
+}
+CLK_OF_DECLARE(ns2_lcpll_ddr_clk, "brcm,ns2-lcpll-ddr",
+ ns2_lcpll_ddr_clk_init);
+
+static const struct iproc_pll_ctrl lcpll_ports = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL,
+ .aon = AON_VAL(0x0, 2, 5, 4),
+ .reset = RESET_VAL(0x4, 2, 1),
+ .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 1, 4),
+ .ndiv_int = REG_VAL(0x8, 4, 10),
+ .pdiv = REG_VAL(0x8, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x10, 0xc),
+ .status = REG_VAL(0x0, 0, 1),
+};
+
+static const struct iproc_clk_ctrl lcpll_ports_clk[] = {
+ /* bypass_shift, the last value passed into ENABLE_VAL(), is not defined
+ * in NS2. However, it doesn't appear to be used anywhere, so setting
+ * it to 0.
+ */
+ [BCM_NS2_LCPLL_PORTS_WAN_CLK] = {
+ .channel = BCM_NS2_LCPLL_PORTS_WAN_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 18, 12, 0),
+ .mdiv = REG_VAL(0x14, 0, 8),
+ },
+ [BCM_NS2_LCPLL_PORTS_RGMII_CLK] = {
+ .channel = BCM_NS2_LCPLL_PORTS_RGMII_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 19, 13, 0),
+ .mdiv = REG_VAL(0x14, 8, 8),
+ },
+ [BCM_NS2_LCPLL_PORTS_CH2_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_PORTS_CH2_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 20, 14, 0),
+ .mdiv = REG_VAL(0x10, 0, 8),
+ },
+ [BCM_NS2_LCPLL_PORTS_CH3_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_PORTS_CH3_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 21, 15, 0),
+ .mdiv = REG_VAL(0x10, 8, 8),
+ },
+ [BCM_NS2_LCPLL_PORTS_CH4_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_PORTS_CH4_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 22, 16, 0),
+ .mdiv = REG_VAL(0x10, 16, 8),
+ },
+ [BCM_NS2_LCPLL_PORTS_CH5_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_PORTS_CH5_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 23, 17, 0),
+ .mdiv = REG_VAL(0x10, 24, 8),
+ },
+};
+
+static void __init ns2_lcpll_ports_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &lcpll_ports, NULL, 0, lcpll_ports_clk,
+ ARRAY_SIZE(lcpll_ports_clk));
+}
+CLK_OF_DECLARE(ns2_lcpll_ports_clk, "brcm,ns2-lcpll-ports",
+ ns2_lcpll_ports_clk_init);
diff --git a/drivers/clk/bcm/clk-nsp.c b/drivers/clk/bcm/clk-nsp.c
new file mode 100644
index 000000000000..cf66f640a47d
--- /dev/null
+++ b/drivers/clk/bcm/clk-nsp.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/bcm-nsp.h>
+#include "clk-iproc.h"
+
+#define REG_VAL(o, s, w) { .offset = o, .shift = s, .width = w, }
+
+#define AON_VAL(o, pw, ps, is) { .offset = o, .pwr_width = pw, \
+ .pwr_shift = ps, .iso_shift = is }
+
+#define RESET_VAL(o, rs, prs) { .offset = o, .reset_shift = rs, \
+ .p_reset_shift = prs }
+
+#define DF_VAL(o, kis, kiw, kps, kpw, kas, kaw) { .offset = o, .ki_shift = kis,\
+ .ki_width = kiw, .kp_shift = kps, .kp_width = kpw, .ka_shift = kas, \
+ .ka_width = kaw }
+
+#define ENABLE_VAL(o, es, hs, bs) { .offset = o, .enable_shift = es, \
+ .hold_shift = hs, .bypass_shift = bs }
+
+static void __init nsp_armpll_init(struct device_node *node)
+{
+ iproc_armpll_setup(node);
+}
+CLK_OF_DECLARE(nsp_armpll, "brcm,nsp-armpll", nsp_armpll_init);
+
+static const struct iproc_pll_ctrl genpll = {
+ .flags = IPROC_CLK_PLL_HAS_NDIV_FRAC | IPROC_CLK_EMBED_PWRCTRL,
+ .aon = AON_VAL(0x0, 1, 12, 0),
+ .reset = RESET_VAL(0x0, 11, 10),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .ndiv_int = REG_VAL(0x14, 20, 10),
+ .ndiv_frac = REG_VAL(0x14, 0, 20),
+ .pdiv = REG_VAL(0x18, 24, 3),
+ .status = REG_VAL(0x20, 12, 1),
+};
+
+static const struct iproc_clk_ctrl genpll_clk[] = {
+ [BCM_NSP_GENPLL_PHY_CLK] = {
+ .channel = BCM_NSP_GENPLL_PHY_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 12, 6, 18),
+ .mdiv = REG_VAL(0x18, 16, 8),
+ },
+ [BCM_NSP_GENPLL_ENET_SW_CLK] = {
+ .channel = BCM_NSP_GENPLL_ENET_SW_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 13, 7, 19),
+ .mdiv = REG_VAL(0x18, 8, 8),
+ },
+ [BCM_NSP_GENPLL_USB_PHY_REF_CLK] = {
+ .channel = BCM_NSP_GENPLL_USB_PHY_REF_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 14, 8, 20),
+ .mdiv = REG_VAL(0x18, 0, 8),
+ },
+ [BCM_NSP_GENPLL_IPROCFAST_CLK] = {
+ .channel = BCM_NSP_GENPLL_IPROCFAST_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 15, 9, 21),
+ .mdiv = REG_VAL(0x1c, 16, 8),
+ },
+ [BCM_NSP_GENPLL_SATA1_CLK] = {
+ .channel = BCM_NSP_GENPLL_SATA1_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 16, 10, 22),
+ .mdiv = REG_VAL(0x1c, 8, 8),
+ },
+ [BCM_NSP_GENPLL_SATA2_CLK] = {
+ .channel = BCM_NSP_GENPLL_SATA2_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 17, 11, 23),
+ .mdiv = REG_VAL(0x1c, 0, 8),
+ },
+};
+
+static void __init nsp_genpll_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &genpll, NULL, 0, genpll_clk,
+ ARRAY_SIZE(genpll_clk));
+}
+CLK_OF_DECLARE(nsp_genpll_clk, "brcm,nsp-genpll", nsp_genpll_clk_init);
+
+static const struct iproc_pll_ctrl lcpll0 = {
+ .flags = IPROC_CLK_PLL_HAS_NDIV_FRAC | IPROC_CLK_EMBED_PWRCTRL,
+ .aon = AON_VAL(0x0, 1, 24, 0),
+ .reset = RESET_VAL(0x0, 23, 22),
+ .dig_filter = DF_VAL(0x0, 16, 3, 12, 4, 19, 4),
+ .ndiv_int = REG_VAL(0x4, 20, 8),
+ .ndiv_frac = REG_VAL(0x4, 0, 20),
+ .pdiv = REG_VAL(0x4, 28, 3),
+ .status = REG_VAL(0x10, 12, 1),
+};
+
+static const struct iproc_clk_ctrl lcpll0_clk[] = {
+ [BCM_NSP_LCPLL0_PCIE_PHY_REF_CLK] = {
+ .channel = BCM_NSP_LCPLL0_PCIE_PHY_REF_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 6, 3, 9),
+ .mdiv = REG_VAL(0x8, 24, 8),
+ },
+ [BCM_NSP_LCPLL0_SDIO_CLK] = {
+ .channel = BCM_NSP_LCPLL0_SDIO_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 7, 4, 10),
+ .mdiv = REG_VAL(0x8, 16, 8),
+ },
+ [BCM_NSP_LCPLL0_DDR_PHY_CLK] = {
+ .channel = BCM_NSP_LCPLL0_DDR_PHY_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 8, 5, 11),
+ .mdiv = REG_VAL(0x8, 8, 8),
+ },
+};
+
+static void __init nsp_lcpll0_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &lcpll0, NULL, 0, lcpll0_clk,
+ ARRAY_SIZE(lcpll0_clk));
+}
+CLK_OF_DECLARE(nsp_lcpll0_clk, "brcm,nsp-lcpll0", nsp_lcpll0_clk_init);
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index 73153fc45ee9..23e0e3be6c37 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -490,8 +490,8 @@ static const struct berlin2_gate_data bg2_gates[] __initconst = {
{ "usb0", "perif", 11 },
{ "usb1", "perif", 12 },
{ "pbridge", "perif", 13, CLK_IGNORE_UNUSED },
- { "sdio0", "perif", 14, CLK_IGNORE_UNUSED },
- { "sdio1", "perif", 15, CLK_IGNORE_UNUSED },
+ { "sdio0", "perif", 14 },
+ { "sdio1", "perif", 15 },
{ "nfc", "perif", 17 },
{ "smemc", "perif", 19 },
{ "audiohd", "audiohd_pll", 26 },
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 221f40c2b850..243f421abcb4 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -283,7 +283,7 @@ static const struct berlin2_gate_data bg2q_gates[] __initconst = {
{ "usb2", "perif", 13 },
{ "usb3", "perif", 14 },
{ "pbridge", "perif", 15, CLK_IGNORE_UNUSED },
- { "sdio", "perif", 16, CLK_IGNORE_UNUSED },
+ { "sdio", "perif", 16 },
{ "nfc", "perif", 18 },
{ "pcie", "perif", 22 },
};
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
deleted file mode 100644
index dd295e498309..000000000000
--- a/drivers/clk/clk-bcm2835.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2010 Broadcom
- * Copyright (C) 2012 Stephen Warren
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
-#include <linux/clk/bcm2835.h>
-#include <linux/of.h>
-
-/*
- * These are fixed clocks. They're probably not all root clocks and it may
- * be possible to turn them on and off but until this is mapped out better
- * it's the only way they can be used.
- */
-void __init bcm2835_init_clocks(void)
-{
- struct clk *clk;
- int ret;
-
- clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT,
- 126000000);
- if (IS_ERR(clk))
- pr_err("apb_pclk not registered\n");
-
- clk = clk_register_fixed_rate(NULL, "uart0_pclk", NULL, CLK_IS_ROOT,
- 3000000);
- if (IS_ERR(clk))
- pr_err("uart0_pclk not registered\n");
- ret = clk_register_clkdev(clk, NULL, "20201000.uart");
- if (ret)
- pr_err("uart0_pclk alias not registered\n");
-
- clk = clk_register_fixed_rate(NULL, "uart1_pclk", NULL, CLK_IS_ROOT,
- 125000000);
- if (IS_ERR(clk))
- pr_err("uart1_pclk not registered\n");
- ret = clk_register_clkdev(clk, NULL, "20215000.uart");
- if (ret)
- pr_err("uart1_pclk alias not registered\n");
-}
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index f24d0a19ae70..3ace102a2a0a 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -24,7 +24,7 @@
* Traits of this clock:
* prepare - clk_prepare only ensures that parents are prepared
* enable - clk_enable only ensures that parents are enabled
- * rate - rate is adjustable. clk->rate = DIV_ROUND_UP(parent->rate / divisor)
+ * rate - rate is adjustable. clk->rate = ceiling(parent->rate / divisor)
* parent - fixed parent. No clk_set_parent support
*/
@@ -132,7 +132,7 @@ unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
return parent_rate;
}
- return DIV_ROUND_UP(parent_rate, div);
+ return DIV_ROUND_UP_ULL((u64)parent_rate, div);
}
EXPORT_SYMBOL_GPL(divider_recalc_rate);
@@ -210,7 +210,7 @@ static int _div_round_up(const struct clk_div_table *table,
unsigned long parent_rate, unsigned long rate,
unsigned long flags)
{
- int div = DIV_ROUND_UP(parent_rate, rate);
+ int div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
if (flags & CLK_DIVIDER_POWER_OF_TWO)
div = __roundup_pow_of_two(div);
@@ -227,7 +227,7 @@ static int _div_round_closest(const struct clk_div_table *table,
int up, down;
unsigned long up_rate, down_rate;
- up = DIV_ROUND_UP(parent_rate, rate);
+ up = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
down = parent_rate / rate;
if (flags & CLK_DIVIDER_POWER_OF_TWO) {
@@ -238,8 +238,8 @@ static int _div_round_closest(const struct clk_div_table *table,
down = _round_down_table(table, down);
}
- up_rate = DIV_ROUND_UP(parent_rate, up);
- down_rate = DIV_ROUND_UP(parent_rate, down);
+ up_rate = DIV_ROUND_UP_ULL((u64)parent_rate, up);
+ down_rate = DIV_ROUND_UP_ULL((u64)parent_rate, down);
return (rate - up_rate) <= (down_rate - rate) ? up : down;
}
@@ -318,7 +318,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
}
parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
rate * i);
- now = DIV_ROUND_UP(parent_rate, i);
+ now = DIV_ROUND_UP_ULL((u64)parent_rate, i);
if (_is_best_div(rate, now, best, flags)) {
bestdiv = i;
best = now;
@@ -342,7 +342,7 @@ long divider_round_rate(struct clk_hw *hw, unsigned long rate,
div = clk_divider_bestdiv(hw, rate, prate, table, width, flags);
- return DIV_ROUND_UP(*prate, div);
+ return DIV_ROUND_UP_ULL((u64)*prate, div);
}
EXPORT_SYMBOL_GPL(divider_round_rate);
@@ -358,7 +358,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
bestdiv &= div_mask(divider->width);
bestdiv = _get_div(divider->table, bestdiv, divider->flags,
divider->width);
- return DIV_ROUND_UP(*prate, bestdiv);
+ return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
}
return divider_round_rate(hw, rate, prate, divider->table,
@@ -371,7 +371,7 @@ int divider_get_val(unsigned long rate, unsigned long parent_rate,
{
unsigned int div, value;
- div = DIV_ROUND_UP(parent_rate, rate);
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
if (!_is_valid_div(table, div, flags))
return -EINVAL;
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index e85f856b8592..5c4955e33f7a 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -7,13 +7,14 @@
*
* Adjustable fractional divider clock implementation.
* Output rate = (m / n) * parent_rate.
+ * Uses rational best approximation algorithm.
*/
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
-#include <linux/gcd.h>
+#include <linux/rational.h>
#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
@@ -22,7 +23,8 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long flags = 0;
- u32 val, m, n;
+ unsigned long m, n;
+ u32 val;
u64 ret;
if (fd->lock)
@@ -50,23 +52,33 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
}
static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+ unsigned long *parent_rate)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
- unsigned maxn = (fd->nmask >> fd->nshift) + 1;
- unsigned div;
+ unsigned long scale;
+ unsigned long m, n;
+ u64 ret;
- if (!rate || rate >= *prate)
- return *prate;
+ if (!rate || rate >= *parent_rate)
+ return *parent_rate;
- div = gcd(*prate, rate);
+ /*
+ * Get rate closer to *parent_rate to guarantee there is no overflow
+ * for m and n. In the result it will be the nearest rate left shifted
+ * by (scale - fd->nwidth) bits.
+ */
+ scale = fls_long(*parent_rate / rate - 1);
+ if (scale > fd->nwidth)
+ rate <<= scale - fd->nwidth;
- while ((*prate / div) > maxn) {
- div <<= 1;
- rate <<= 1;
- }
+ rational_best_approximation(rate, *parent_rate,
+ GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
+ &m, &n);
- return rate;
+ ret = (u64)*parent_rate * m;
+ do_div(ret, n);
+
+ return ret;
}
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -74,13 +86,12 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long flags = 0;
- unsigned long div;
- unsigned n, m;
+ unsigned long m, n;
u32 val;
- div = gcd(parent_rate, rate);
- m = rate / div;
- n = parent_rate / div;
+ rational_best_approximation(rate, parent_rate,
+ GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
+ &m, &n);
if (fd->lock)
spin_lock_irqsave(fd->lock, flags);
@@ -128,9 +139,11 @@ struct clk *clk_register_fractional_divider(struct device *dev,
fd->reg = reg;
fd->mshift = mshift;
- fd->mmask = (BIT(mwidth) - 1) << mshift;
+ fd->mwidth = mwidth;
+ fd->mmask = GENMASK(mwidth - 1, 0) << mshift;
fd->nshift = nshift;
- fd->nmask = (BIT(nwidth) - 1) << nshift;
+ fd->nwidth = nwidth;
+ fd->nmask = GENMASK(nwidth - 1, 0) << nshift;
fd->flags = clk_divider_flags;
fd->lock = lock;
fd->hw.init = &init;
diff --git a/drivers/clk/clk-max77802.c b/drivers/clk/clk-max77802.c
index 74c49b93a6eb..4a89f7979ba0 100644
--- a/drivers/clk/clk-max77802.c
+++ b/drivers/clk/clk-max77802.c
@@ -94,5 +94,5 @@ static struct platform_driver max77802_clk_driver = {
module_platform_driver(max77802_clk_driver);
MODULE_DESCRIPTION("MAXIM 77802 Clock Driver");
-MODULE_AUTHOR("Javier Martinez Canillas <javier.martinez@collabora.co.uk>");
+MODULE_AUTHOR("Javier Martinez Canillas <javier@osg.samsung.com");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
new file mode 100644
index 000000000000..fe7806506bf3
--- /dev/null
+++ b/drivers/clk/clk-multiplier.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2015 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
+
+static unsigned long __get_mult(struct clk_multiplier *mult,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ if (mult->flags & CLK_MULTIPLIER_ROUND_CLOSEST)
+ return DIV_ROUND_CLOSEST(rate, parent_rate);
+
+ return rate / parent_rate;
+}
+
+static unsigned long clk_multiplier_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_multiplier *mult = to_clk_multiplier(hw);
+ unsigned long val;
+
+ val = clk_readl(mult->reg) >> mult->shift;
+ val &= GENMASK(mult->width - 1, 0);
+
+ if (!val && mult->flags & CLK_MULTIPLIER_ZERO_BYPASS)
+ val = 1;
+
+ return parent_rate * val;
+}
+
+static bool __is_best_rate(unsigned long rate, unsigned long new,
+ unsigned long best, unsigned long flags)
+{
+ if (flags & CLK_MULTIPLIER_ROUND_CLOSEST)
+ return abs(rate - new) < abs(rate - best);
+
+ return new >= rate && new < best;
+}
+
+static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate,
+ u8 width, unsigned long flags)
+{
+ unsigned long orig_parent_rate = *best_parent_rate;
+ unsigned long parent_rate, current_rate, best_rate = ~0;
+ unsigned int i, bestmult = 0;
+
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT))
+ return rate / *best_parent_rate;
+
+ for (i = 1; i < ((1 << width) - 1); i++) {
+ if (rate == orig_parent_rate * i) {
+ /*
+ * This is the best case for us if we have a
+ * perfect match without changing the parent
+ * rate.
+ */
+ *best_parent_rate = orig_parent_rate;
+ return i;
+ }
+
+ parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ rate / i);
+ current_rate = parent_rate * i;
+
+ if (__is_best_rate(rate, current_rate, best_rate, flags)) {
+ bestmult = i;
+ best_rate = current_rate;
+ *best_parent_rate = parent_rate;
+ }
+ }
+
+ return bestmult;
+}
+
+static long clk_multiplier_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_multiplier *mult = to_clk_multiplier(hw);
+ unsigned long factor = __bestmult(hw, rate, parent_rate,
+ mult->width, mult->flags);
+
+ return *parent_rate * factor;
+}
+
+static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_multiplier *mult = to_clk_multiplier(hw);
+ unsigned long factor = __get_mult(mult, rate, parent_rate);
+ unsigned long flags = 0;
+ unsigned long val;
+
+ if (mult->lock)
+ spin_lock_irqsave(mult->lock, flags);
+ else
+ __acquire(mult->lock);
+
+ val = clk_readl(mult->reg);
+ val &= ~GENMASK(mult->width + mult->shift - 1, mult->shift);
+ val |= factor << mult->shift;
+ clk_writel(val, mult->reg);
+
+ if (mult->lock)
+ spin_unlock_irqrestore(mult->lock, flags);
+ else
+ __release(mult->lock);
+
+ return 0;
+}
+
+const struct clk_ops clk_multiplier_ops = {
+ .recalc_rate = clk_multiplier_recalc_rate,
+ .round_rate = clk_multiplier_round_rate,
+ .set_rate = clk_multiplier_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_multiplier_ops);
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
new file mode 100644
index 000000000000..6af7dce54241
--- /dev/null
+++ b/drivers/clk/clk-si514.c
@@ -0,0 +1,379 @@
+/*
+ * Driver for Silicon Labs Si514 Programmable Oscillator
+ *
+ * Copyright (C) 2015 Topic Embedded Products
+ *
+ * Author: Mike Looijmans <mike.looijmans@topic.nl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* I2C registers */
+#define SI514_REG_LP 0
+#define SI514_REG_M_FRAC1 5
+#define SI514_REG_M_FRAC2 6
+#define SI514_REG_M_FRAC3 7
+#define SI514_REG_M_INT_FRAC 8
+#define SI514_REG_M_INT 9
+#define SI514_REG_HS_DIV 10
+#define SI514_REG_LS_HS_DIV 11
+#define SI514_REG_OE_STATE 14
+#define SI514_REG_RESET 128
+#define SI514_REG_CONTROL 132
+
+/* Register values */
+#define SI514_RESET_RST BIT(7)
+
+#define SI514_CONTROL_FCAL BIT(0)
+#define SI514_CONTROL_OE BIT(2)
+
+#define SI514_MIN_FREQ 100000U
+#define SI514_MAX_FREQ 250000000U
+
+#define FXO 31980000U
+
+#define FVCO_MIN 2080000000U
+#define FVCO_MAX 2500000000U
+
+#define HS_DIV_MAX 1022
+
+struct clk_si514 {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ struct i2c_client *i2c_client;
+};
+#define to_clk_si514(_hw) container_of(_hw, struct clk_si514, hw)
+
+/* Multiplier/divider settings */
+struct clk_si514_muldiv {
+ u32 m_frac; /* 29-bit Fractional part of multiplier M */
+ u8 m_int; /* Integer part of multiplier M, 65..78 */
+ u8 ls_div_bits; /* 2nd divider, as 2^x */
+ u16 hs_div; /* 1st divider, must be even and 10<=x<=1022 */
+};
+
+/* Enables or disables the output driver */
+static int si514_enable_output(struct clk_si514 *data, bool enable)
+{
+ return regmap_update_bits(data->regmap, SI514_REG_CONTROL,
+ SI514_CONTROL_OE, enable ? SI514_CONTROL_OE : 0);
+}
+
+/* Retrieve clock multiplier and dividers from hardware */
+static int si514_get_muldiv(struct clk_si514 *data,
+ struct clk_si514_muldiv *settings)
+{
+ int err;
+ u8 reg[7];
+
+ err = regmap_bulk_read(data->regmap, SI514_REG_M_FRAC1,
+ reg, ARRAY_SIZE(reg));
+ if (err)
+ return err;
+
+ settings->m_frac = reg[0] | reg[1] << 8 | reg[2] << 16 |
+ (reg[3] & 0x1F) << 24;
+ settings->m_int = (reg[4] & 0x3f) << 3 | reg[3] >> 5;
+ settings->ls_div_bits = (reg[6] >> 4) & 0x07;
+ settings->hs_div = (reg[6] & 0x03) << 8 | reg[5];
+ return 0;
+}
+
+static int si514_set_muldiv(struct clk_si514 *data,
+ struct clk_si514_muldiv *settings)
+{
+ u8 lp;
+ u8 reg[7];
+ int err;
+
+ /* Calculate LP1/LP2 according to table 13 in the datasheet */
+ /* 65.259980246 */
+ if (settings->m_int < 65 ||
+ (settings->m_int == 65 && settings->m_frac <= 139575831))
+ lp = 0x22;
+ /* 67.859763463 */
+ else if (settings->m_int < 67 ||
+ (settings->m_int == 67 && settings->m_frac <= 461581994))
+ lp = 0x23;
+ /* 72.937624981 */
+ else if (settings->m_int < 72 ||
+ (settings->m_int == 72 && settings->m_frac <= 503383578))
+ lp = 0x33;
+ /* 75.843265046 */
+ else if (settings->m_int < 75 ||
+ (settings->m_int == 75 && settings->m_frac <= 452724474))
+ lp = 0x34;
+ else
+ lp = 0x44;
+
+ err = regmap_write(data->regmap, SI514_REG_LP, lp);
+ if (err < 0)
+ return err;
+
+ reg[0] = settings->m_frac;
+ reg[1] = settings->m_frac >> 8;
+ reg[2] = settings->m_frac >> 16;
+ reg[3] = settings->m_frac >> 24 | settings->m_int << 5;
+ reg[4] = settings->m_int >> 3;
+ reg[5] = settings->hs_div;
+ reg[6] = (settings->hs_div >> 8) | (settings->ls_div_bits << 4);
+
+ err = regmap_bulk_write(data->regmap, SI514_REG_HS_DIV, reg + 5, 2);
+ if (err < 0)
+ return err;
+ /*
+ * Writing to SI514_REG_M_INT_FRAC triggers the clock change, so that
+ * must be written last
+ */
+ return regmap_bulk_write(data->regmap, SI514_REG_M_FRAC1, reg, 5);
+}
+
+/* Calculate divider settings for a given frequency */
+static int si514_calc_muldiv(struct clk_si514_muldiv *settings,
+ unsigned long frequency)
+{
+ u64 m;
+ u32 ls_freq;
+ u32 tmp;
+ u8 res;
+
+ if ((frequency < SI514_MIN_FREQ) || (frequency > SI514_MAX_FREQ))
+ return -EINVAL;
+
+ /* Determine the minimum value of LS_DIV and resulting target freq. */
+ ls_freq = frequency;
+ if (frequency >= (FVCO_MIN / HS_DIV_MAX))
+ settings->ls_div_bits = 0;
+ else {
+ res = 1;
+ tmp = 2 * HS_DIV_MAX;
+ while (tmp <= (HS_DIV_MAX * 32)) {
+ if ((frequency * tmp) >= FVCO_MIN)
+ break;
+ ++res;
+ tmp <<= 1;
+ }
+ settings->ls_div_bits = res;
+ ls_freq = frequency << res;
+ }
+
+ /* Determine minimum HS_DIV, round up to even number */
+ settings->hs_div = DIV_ROUND_UP(FVCO_MIN >> 1, ls_freq) << 1;
+
+ /* M = LS_DIV x HS_DIV x frequency / F_XO (in fixed-point) */
+ m = ((u64)(ls_freq * settings->hs_div) << 29) + (FXO / 2);
+ do_div(m, FXO);
+ settings->m_frac = (u32)m & (BIT(29) - 1);
+ settings->m_int = (u32)(m >> 29);
+
+ return 0;
+}
+
+/* Calculate resulting frequency given the register settings */
+static unsigned long si514_calc_rate(struct clk_si514_muldiv *settings)
+{
+ u64 m = settings->m_frac | ((u64)settings->m_int << 29);
+ u32 d = settings->hs_div * BIT(settings->ls_div_bits);
+
+ return ((u32)(((m * FXO) + (FXO / 2)) >> 29)) / d;
+}
+
+static unsigned long si514_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_si514 *data = to_clk_si514(hw);
+ struct clk_si514_muldiv settings;
+ int err;
+
+ err = si514_get_muldiv(data, &settings);
+ if (err) {
+ dev_err(&data->i2c_client->dev, "unable to retrieve settings\n");
+ return 0;
+ }
+
+ return si514_calc_rate(&settings);
+}
+
+static long si514_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_si514_muldiv settings;
+ int err;
+
+ if (!rate)
+ return 0;
+
+ err = si514_calc_muldiv(&settings, rate);
+ if (err)
+ return err;
+
+ return si514_calc_rate(&settings);
+}
+
+/*
+ * Update output frequency for big frequency changes (> 1000 ppm).
+ * The chip supports <1000ppm changes "on the fly", we haven't implemented
+ * that here.
+ */
+static int si514_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_si514 *data = to_clk_si514(hw);
+ struct clk_si514_muldiv settings;
+ int err;
+
+ err = si514_calc_muldiv(&settings, rate);
+ if (err)
+ return err;
+
+ si514_enable_output(data, false);
+
+ err = si514_set_muldiv(data, &settings);
+ if (err < 0)
+ return err; /* Undefined state now, best to leave disabled */
+
+ /* Trigger calibration */
+ err = regmap_write(data->regmap, SI514_REG_CONTROL, SI514_CONTROL_FCAL);
+ if (err < 0)
+ return err;
+
+ /* Applying a new frequency can take up to 10ms */
+ usleep_range(10000, 12000);
+
+ si514_enable_output(data, true);
+
+ return err;
+}
+
+static const struct clk_ops si514_clk_ops = {
+ .recalc_rate = si514_recalc_rate,
+ .round_rate = si514_round_rate,
+ .set_rate = si514_set_rate,
+};
+
+static bool si514_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SI514_REG_CONTROL:
+ case SI514_REG_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool si514_regmap_is_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SI514_REG_LP:
+ case SI514_REG_M_FRAC1 ... SI514_REG_LS_HS_DIV:
+ case SI514_REG_OE_STATE:
+ case SI514_REG_RESET:
+ case SI514_REG_CONTROL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config si514_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = SI514_REG_CONTROL,
+ .writeable_reg = si514_regmap_is_writeable,
+ .volatile_reg = si514_regmap_is_volatile,
+};
+
+static int si514_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct clk_si514 *data;
+ struct clk_init_data init;
+ struct clk *clk;
+ int err;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ init.ops = &si514_clk_ops;
+ init.flags = CLK_IS_ROOT;
+ init.num_parents = 0;
+ data->hw.init = &init;
+ data->i2c_client = client;
+
+ if (of_property_read_string(client->dev.of_node, "clock-output-names",
+ &init.name))
+ init.name = client->dev.of_node->name;
+
+ data->regmap = devm_regmap_init_i2c(client, &si514_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ i2c_set_clientdata(client, data);
+
+ clk = devm_clk_register(&client->dev, &data->hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "clock registration failed\n");
+ return PTR_ERR(clk);
+ }
+ err = of_clk_add_provider(client->dev.of_node, of_clk_src_simple_get,
+ clk);
+ if (err) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int si514_remove(struct i2c_client *client)
+{
+ of_clk_del_provider(client->dev.of_node);
+ return 0;
+}
+
+static const struct i2c_device_id si514_id[] = {
+ { "si514", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si514_id);
+
+static const struct of_device_id clk_si514_of_match[] = {
+ { .compatible = "silabs,si514" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, clk_si514_of_match);
+
+static struct i2c_driver si514_driver = {
+ .driver = {
+ .name = "si514",
+ .of_match_table = clk_si514_of_match,
+ },
+ .probe = si514_probe,
+ .remove = si514_remove,
+ .id_table = si514_id,
+};
+module_i2c_driver(si514_driver);
+
+MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>");
+MODULE_DESCRIPTION("Si514 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 5596c0aac22f..e346b223199d 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1183,13 +1183,13 @@ static int si5351_dt_parse(struct i2c_client *client,
if (of_property_read_u32(child, "reg", &num)) {
dev_err(&client->dev, "missing reg property of %s\n",
child->name);
- return -EINVAL;
+ goto put_child;
}
if (num >= 8 ||
(variant == SI5351_VARIANT_A3 && num >= 3)) {
dev_err(&client->dev, "invalid clkout %d\n", num);
- return -EINVAL;
+ goto put_child;
}
if (!of_property_read_u32(child, "silabs,multisynth-source",
@@ -1207,7 +1207,7 @@ static int si5351_dt_parse(struct i2c_client *client,
dev_err(&client->dev,
"invalid parent %d for multisynth %d\n",
val, num);
- return -EINVAL;
+ goto put_child;
}
}
@@ -1230,7 +1230,7 @@ static int si5351_dt_parse(struct i2c_client *client,
dev_err(&client->dev,
"invalid parent %d for clkout %d\n",
val, num);
- return -EINVAL;
+ goto put_child;
}
pdata->clkout[num].clkout_src =
SI5351_CLKOUT_SRC_CLKIN;
@@ -1239,7 +1239,7 @@ static int si5351_dt_parse(struct i2c_client *client,
dev_err(&client->dev,
"invalid parent %d for clkout %d\n",
val, num);
- return -EINVAL;
+ goto put_child;
}
}
@@ -1256,7 +1256,7 @@ static int si5351_dt_parse(struct i2c_client *client,
dev_err(&client->dev,
"invalid drive strength %d for clkout %d\n",
val, num);
- return -EINVAL;
+ goto put_child;
}
}
@@ -1283,7 +1283,7 @@ static int si5351_dt_parse(struct i2c_client *client,
dev_err(&client->dev,
"invalid disable state %d for clkout %d\n",
val, num);
- return -EINVAL;
+ goto put_child;
}
}
@@ -1296,6 +1296,9 @@ static int si5351_dt_parse(struct i2c_client *client,
client->dev.platform_data = pdata;
return 0;
+put_child:
+ of_node_put(child);
+ return -EINVAL;
}
#else
static int si5351_dt_parse(struct i2c_client *client, enum si5351_variant variant)
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 96a6190acac2..27c0da29eca3 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -27,7 +27,6 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
-#include <asm/setup.h>
/* Register SCU_PCPPLL bit fields */
#define N_DIV_RD(src) (((src) & 0x000001ff))
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0ebcf449778a..f13c3f4228d4 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -272,7 +272,7 @@ late_initcall_sync(clk_disable_unused);
/*** helper functions ***/
-const char *__clk_get_name(struct clk *clk)
+const char *__clk_get_name(const struct clk *clk)
{
return !clk ? NULL : clk->core->name;
}
@@ -427,6 +427,11 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
return clk_core_is_prepared(hw->core);
}
+bool clk_hw_is_enabled(const struct clk_hw *hw)
+{
+ return clk_core_is_enabled(hw->core);
+}
+
bool __clk_is_enabled(struct clk *clk)
{
if (!clk)
@@ -1685,7 +1690,7 @@ static struct clk_core *__clk_init_parent(struct clk_core *core)
"%s: multi-parent clocks must implement .get_parent\n",
__func__);
goto out;
- };
+ }
/*
* Do our best to cache parent clocks in core->parents. This prevents
@@ -2932,7 +2937,7 @@ struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
unsigned int idx = clkspec->args[0];
if (idx >= clk_data->clk_num) {
- pr_err("%s: invalid clock index %d\n", __func__, idx);
+ pr_err("%s: invalid clock index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
@@ -3055,6 +3060,7 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
u32 pv;
int rc;
int count;
+ struct clk *clk;
if (index < 0)
return NULL;
@@ -3080,8 +3086,25 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
if (of_property_read_string_index(clkspec.np, "clock-output-names",
index,
- &clk_name) < 0)
- clk_name = clkspec.np->name;
+ &clk_name) < 0) {
+ /*
+ * Best effort to get the name if the clock has been
+ * registered with the framework. If the clock isn't
+ * registered, we return the node name as the name of
+ * the clock as long as #clock-cells = 0.
+ */
+ clk = of_clk_get_from_provider(&clkspec);
+ if (IS_ERR(clk)) {
+ if (clkspec.args_count == 0)
+ clk_name = clkspec.np->name;
+ else
+ clk_name = NULL;
+ } else {
+ clk_name = __clk_get_name(clk);
+ clk_put(clk);
+ }
+ }
+
of_node_put(clkspec.np);
return clk_name;
@@ -3179,13 +3202,15 @@ void __init of_clk_init(const struct of_device_id *matches)
list_for_each_entry_safe(clk_provider, next,
&clk_provider_list, node) {
list_del(&clk_provider->node);
+ of_node_put(clk_provider->np);
kfree(clk_provider);
}
+ of_node_put(np);
return;
}
parent->clk_init_cb = match->data;
- parent->np = np;
+ parent->np = of_node_get(np);
list_add_tail(&parent->node, &clk_provider_list);
}
@@ -3199,6 +3224,7 @@ void __init of_clk_init(const struct of_device_id *matches)
of_clk_set_defaults(clk_provider->np, true);
list_del(&clk_provider->node);
+ of_node_put(clk_provider->np);
kfree(clk_provider);
is_init_done = true;
}
diff --git a/drivers/clk/hisilicon/clk-hi6220-stub.c b/drivers/clk/hisilicon/clk-hi6220-stub.c
index 2c4add11c1ca..8afb40ef40ce 100644
--- a/drivers/clk/hisilicon/clk-hi6220-stub.c
+++ b/drivers/clk/hisilicon/clk-hi6220-stub.c
@@ -230,7 +230,7 @@ static int hi6220_stub_clk_probe(struct platform_device *pdev)
if (IS_ERR(stub_clk->mbox)) {
dev_err(dev, "failed get mailbox channel\n");
return PTR_ERR(stub_clk->mbox);
- };
+ }
init.name = "acpu0";
init.ops = &hi6220_stub_clk_ops;
diff --git a/drivers/clk/imx/clk-imx25.c b/drivers/clk/imx/clk-imx25.c
index ec1a4c1dacf1..c4c141cab444 100644
--- a/drivers/clk/imx/clk-imx25.c
+++ b/drivers/clk/imx/clk-imx25.c
@@ -86,6 +86,16 @@ enum mx25_clks {
static struct clk *clk[clk_max];
+static struct clk ** const uart_clks[] __initconst = {
+ &clk[uart_ipg_per],
+ &clk[uart1_ipg],
+ &clk[uart2_ipg],
+ &clk[uart3_ipg],
+ &clk[uart4_ipg],
+ &clk[uart5_ipg],
+ NULL
+};
+
static int __init __mx25_clocks_init(unsigned long osc_rate,
void __iomem *ccm_base)
{
@@ -233,6 +243,8 @@ static int __init __mx25_clocks_init(unsigned long osc_rate,
*/
clk_set_parent(clk[cko_sel], clk[ipg]);
+ imx_register_uart_clocks(uart_clks);
+
return 0;
}
diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
index d9d50d54ef2a..cf5cf75a4848 100644
--- a/drivers/clk/imx/clk-imx27.c
+++ b/drivers/clk/imx/clk-imx27.c
@@ -47,6 +47,17 @@ static const char *ssi_sel_clks[] = { "spll_gate", "mpll", };
static struct clk *clk[IMX27_CLK_MAX];
static struct clk_onecell_data clk_data;
+static struct clk ** const uart_clks[] __initconst = {
+ &clk[IMX27_CLK_PER1_GATE],
+ &clk[IMX27_CLK_UART1_IPG_GATE],
+ &clk[IMX27_CLK_UART2_IPG_GATE],
+ &clk[IMX27_CLK_UART3_IPG_GATE],
+ &clk[IMX27_CLK_UART4_IPG_GATE],
+ &clk[IMX27_CLK_UART5_IPG_GATE],
+ &clk[IMX27_CLK_UART6_IPG_GATE],
+ NULL
+};
+
static void __init _mx27_clocks_init(unsigned long fref)
{
BUG_ON(!ccm);
@@ -163,6 +174,8 @@ static void __init _mx27_clocks_init(unsigned long fref)
clk_prepare_enable(clk[IMX27_CLK_EMI_AHB_GATE]);
+ imx_register_uart_clocks(uart_clks);
+
imx_print_silicon_rev("i.MX27", mx27_revision());
}
@@ -248,8 +261,10 @@ static void __init mx27_clocks_init_dt(struct device_node *np)
if (!of_device_is_compatible(refnp, "fsl,imx-osc26m"))
continue;
- if (!of_property_read_u32(refnp, "clock-frequency", &fref))
+ if (!of_property_read_u32(refnp, "clock-frequency", &fref)) {
+ of_node_put(refnp);
break;
+ }
}
ccm = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c
index 1f8383475bb3..6a964144a5b5 100644
--- a/drivers/clk/imx/clk-imx31.c
+++ b/drivers/clk/imx/clk-imx31.c
@@ -62,7 +62,17 @@ enum mx31_clks {
static struct clk *clk[clk_max];
static struct clk_onecell_data clk_data;
-int __init mx31_clocks_init(unsigned long fref)
+static struct clk ** const uart_clks[] __initconst = {
+ &clk[ipg],
+ &clk[uart1_gate],
+ &clk[uart2_gate],
+ &clk[uart3_gate],
+ &clk[uart4_gate],
+ &clk[uart5_gate],
+ NULL
+};
+
+static void __init _mx31_clocks_init(unsigned long fref)
{
void __iomem *base;
struct device_node *np;
@@ -132,6 +142,12 @@ int __init mx31_clocks_init(unsigned long fref)
imx_check_clocks(clk, ARRAY_SIZE(clk));
+ clk_set_parent(clk[csi], clk[upll]);
+ clk_prepare_enable(clk[emi_gate]);
+ clk_prepare_enable(clk[iim_gate]);
+ mx31_revision();
+ clk_disable_unprepare(clk[iim_gate]);
+
np = of_find_compatible_node(NULL, NULL, "fsl,imx31-ccm");
if (np) {
@@ -139,6 +155,13 @@ int __init mx31_clocks_init(unsigned long fref)
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
}
+}
+
+int __init mx31_clocks_init(void)
+{
+ u32 fref = 26000000; /* default */
+
+ _mx31_clocks_init(fref);
clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
@@ -194,12 +217,8 @@ int __init mx31_clocks_init(unsigned long fref)
clk_register_clkdev(clk[sdma_gate], NULL, "imx31-sdma");
clk_register_clkdev(clk[iim_gate], "iim", NULL);
- clk_set_parent(clk[csi], clk[upll]);
- clk_prepare_enable(clk[emi_gate]);
- clk_prepare_enable(clk[iim_gate]);
- mx31_revision();
- clk_disable_unprepare(clk[iim_gate]);
+ imx_register_uart_clocks(uart_clks);
mxc_timer_init(MX31_GPT1_BASE_ADDR, MX31_INT_GPT, GPT_TYPE_IMX31);
return 0;
@@ -214,9 +233,13 @@ int __init mx31_clocks_init_dt(void)
if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
continue;
- if (!of_property_read_u32(np, "clock-frequency", &fref))
+ if (!of_property_read_u32(np, "clock-frequency", &fref)) {
+ of_node_put(np);
break;
+ }
}
- return mx31_clocks_init(fref);
+ _mx31_clocks_init(fref);
+
+ return 0;
}
diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
index 8623cd4e49fd..a71d24cb4c06 100644
--- a/drivers/clk/imx/clk-imx35.c
+++ b/drivers/clk/imx/clk-imx35.c
@@ -84,7 +84,15 @@ enum mx35_clks {
static struct clk *clk[clk_max];
-int __init mx35_clocks_init(void)
+static struct clk ** const uart_clks[] __initconst = {
+ &clk[ipg],
+ &clk[uart1_gate],
+ &clk[uart2_gate],
+ &clk[uart3_gate],
+ NULL
+};
+
+static void __init _mx35_clocks_init(void)
{
void __iomem *base;
u32 pdr0, consumer_sel, hsp_sel;
@@ -220,6 +228,32 @@ int __init mx35_clocks_init(void)
imx_check_clocks(clk, ARRAY_SIZE(clk));
+ clk_prepare_enable(clk[spba_gate]);
+ clk_prepare_enable(clk[gpio1_gate]);
+ clk_prepare_enable(clk[gpio2_gate]);
+ clk_prepare_enable(clk[gpio3_gate]);
+ clk_prepare_enable(clk[iim_gate]);
+ clk_prepare_enable(clk[emi_gate]);
+ clk_prepare_enable(clk[max_gate]);
+ clk_prepare_enable(clk[iomuxc_gate]);
+
+ /*
+ * SCC is needed to boot via mmc after a watchdog reset. The clock code
+ * before conversion to common clk also enabled UART1 (which isn't
+ * handled here and not needed for mmc) and IIM (which is enabled
+ * unconditionally above).
+ */
+ clk_prepare_enable(clk[scc_gate]);
+
+ imx_register_uart_clocks(uart_clks);
+
+ imx_print_silicon_rev("i.MX35", mx35_revision());
+}
+
+int __init mx35_clocks_init(void)
+{
+ _mx35_clocks_init();
+
clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
@@ -279,25 +313,6 @@ int __init mx35_clocks_init(void)
clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
clk_register_clkdev(clk[admux_gate], "audmux", NULL);
- clk_prepare_enable(clk[spba_gate]);
- clk_prepare_enable(clk[gpio1_gate]);
- clk_prepare_enable(clk[gpio2_gate]);
- clk_prepare_enable(clk[gpio3_gate]);
- clk_prepare_enable(clk[iim_gate]);
- clk_prepare_enable(clk[emi_gate]);
- clk_prepare_enable(clk[max_gate]);
- clk_prepare_enable(clk[iomuxc_gate]);
-
- /*
- * SCC is needed to boot via mmc after a watchdog reset. The clock code
- * before conversion to common clk also enabled UART1 (which isn't
- * handled here and not needed for mmc) and IIM (which is enabled
- * unconditionally above).
- */
- clk_prepare_enable(clk[scc_gate]);
-
- imx_print_silicon_rev("i.MX35", mx35_revision());
-
mxc_timer_init(MX35_GPT1_BASE_ADDR, MX35_INT_GPT, GPT_TYPE_IMX31);
return 0;
@@ -305,10 +320,10 @@ int __init mx35_clocks_init(void)
static void __init mx35_clocks_init_dt(struct device_node *ccm_node)
{
+ _mx35_clocks_init();
+
clk_data.clks = clk;
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(ccm_node, of_clk_src_onecell_get, &clk_data);
-
- mx35_clocks_init();
}
CLK_OF_DECLARE(imx35, "fsl,imx35-ccm", mx35_clocks_init_dt);
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c
index a7e4f394be0d..c6770348d2ab 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx51-imx53.c
@@ -130,6 +130,20 @@ static const char *cpu_podf_sels[] = { "pll1_sw", "step_sel" };
static struct clk *clk[IMX5_CLK_END];
static struct clk_onecell_data clk_data;
+static struct clk ** const uart_clks[] __initconst = {
+ &clk[IMX5_CLK_UART1_IPG_GATE],
+ &clk[IMX5_CLK_UART1_PER_GATE],
+ &clk[IMX5_CLK_UART2_IPG_GATE],
+ &clk[IMX5_CLK_UART2_PER_GATE],
+ &clk[IMX5_CLK_UART3_IPG_GATE],
+ &clk[IMX5_CLK_UART3_PER_GATE],
+ &clk[IMX5_CLK_UART4_IPG_GATE],
+ &clk[IMX5_CLK_UART4_PER_GATE],
+ &clk[IMX5_CLK_UART5_IPG_GATE],
+ &clk[IMX5_CLK_UART5_PER_GATE],
+ NULL
+};
+
static void __init mx5_clocks_common_init(void __iomem *ccm_base)
{
clk[IMX5_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -310,6 +324,8 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk_prepare_enable(clk[IMX5_CLK_TMAX1]);
clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */
clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */
+
+ imx_register_uart_clocks(uart_clks);
}
static void __init mx50_clocks_init(struct device_node *np)
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index b2c1c047dc94..c1935081d34a 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -119,6 +119,7 @@ static unsigned int share_count_ssi1;
static unsigned int share_count_ssi2;
static unsigned int share_count_ssi3;
static unsigned int share_count_mipi_core_cfg;
+static unsigned int share_count_spdif;
static inline int clk_on_imx6q(void)
{
@@ -130,6 +131,12 @@ static inline int clk_on_imx6dl(void)
return of_machine_is_compatible("fsl,imx6dl");
}
+static struct clk ** const uart_clks[] __initconst = {
+ &clk[IMX6QDL_CLK_UART_IPG],
+ &clk[IMX6QDL_CLK_UART_SERIAL],
+ NULL
+};
+
static void __init imx6q_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
@@ -456,7 +463,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_SATA] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
clk[IMX6QDL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clk[IMX6QDL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
- clk[IMX6QDL_CLK_SPDIF] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14);
+ clk[IMX6QDL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_spdif);
+ clk[IMX6QDL_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_spdif);
clk[IMX6QDL_CLK_SSI1_IPG] = imx_clk_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1);
clk[IMX6QDL_CLK_SSI2_IPG] = imx_clk_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2);
clk[IMX6QDL_CLK_SSI3_IPG] = imx_clk_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3);
@@ -541,5 +549,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
/* All existing boards with PCIe use LVDS1 */
if (IS_ENABLED(CONFIG_PCI_IMX6))
clk_set_parent(clk[IMX6QDL_CLK_LVDS1_SEL], clk[IMX6QDL_CLK_SATA_REF_100M]);
+
+ imx_register_uart_clocks(uart_clks);
}
CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index a0d4cf26cfa9..1be6230a07af 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -97,6 +97,7 @@ static struct clk_div_table video_div_table[] = {
static unsigned int share_count_ssi1;
static unsigned int share_count_ssi2;
static unsigned int share_count_ssi3;
+static unsigned int share_count_spdif;
static struct clk *clks[IMX6SL_CLK_END];
static struct clk_onecell_data clk_data;
@@ -184,6 +185,12 @@ void imx6sl_set_wait_clk(bool enter)
imx6sl_enable_pll_arm(false);
}
+static struct clk ** const uart_clks[] __initconst = {
+ &clks[IMX6SL_CLK_UART],
+ &clks[IMX6SL_CLK_UART_SERIAL],
+ NULL
+};
+
static void __init imx6sl_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
@@ -391,7 +398,8 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
clks[IMX6SL_CLK_PWM4] = imx_clk_gate2("pwm4", "perclk", base + 0x78, 22);
clks[IMX6SL_CLK_SDMA] = imx_clk_gate2("sdma", "ipg", base + 0x7c, 6);
clks[IMX6SL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
- clks[IMX6SL_CLK_SPDIF] = imx_clk_gate2("spdif", "spdif0_podf", base + 0x7c, 14);
+ clks[IMX6SL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif0_podf", base + 0x7c, 14, &share_count_spdif);
+ clks[IMX6SL_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_spdif);
clks[IMX6SL_CLK_SSI1_IPG] = imx_clk_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1);
clks[IMX6SL_CLK_SSI2_IPG] = imx_clk_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2);
clks[IMX6SL_CLK_SSI3_IPG] = imx_clk_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3);
@@ -439,5 +447,7 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
clk_set_parent(clks[IMX6SL_CLK_LCDIF_AXI_SEL],
clks[IMX6SL_CLK_PLL2_PFD2]);
+
+ imx_register_uart_clocks(uart_clks);
}
CLK_OF_DECLARE(imx6sl, "fsl,imx6sl-ccm", imx6sl_clocks_init);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index 5b95c2c2bf52..fea125eb4330 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -135,6 +135,12 @@ static u32 share_count_ssi1;
static u32 share_count_ssi2;
static u32 share_count_ssi3;
+static struct clk ** const uart_clks[] __initconst = {
+ &clks[IMX6SX_CLK_UART_IPG],
+ &clks[IMX6SX_CLK_UART_SERIAL],
+ NULL
+};
+
static void __init imx6sx_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
@@ -454,6 +460,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
clks[IMX6SX_CLK_AUDIO] = imx_clk_gate2_shared("audio", "audio_podf", base + 0x7c, 14, &share_count_audio);
clks[IMX6SX_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio);
+ clks[IMX6SX_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio);
clks[IMX6SX_CLK_SSI1_IPG] = imx_clk_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1);
clks[IMX6SX_CLK_SSI2_IPG] = imx_clk_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2);
clks[IMX6SX_CLK_SSI3_IPG] = imx_clk_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3);
@@ -557,5 +564,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clk_set_parent(clks[IMX6SX_CLK_QSPI1_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
clk_set_parent(clks[IMX6SX_CLK_QSPI2_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
+
+ imx_register_uart_clocks(uart_clks);
}
CLK_OF_DECLARE(imx6sx, "fsl,imx6sx-ccm", imx6sx_clocks_init);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index aaa36650695f..01718d05e952 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -407,6 +407,24 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_data.clk_num = ARRAY_SIZE(clks);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ /*
+ * Lower the AHB clock rate before changing the parent clock source,
+ * as AHB clock rate can NOT be higher than 133MHz, but its parent
+ * will be switched from 396MHz PFD to 528MHz PLL in order to increase
+ * AXI clock rate, so we need to lower AHB rate first to make sure at
+ * any time, AHB rate is <= 133MHz.
+ */
+ clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000);
+
+ /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */
+ clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
+ clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]);
+ clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]);
+ clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]);
+
+ /* Make sure AHB rate is 132MHz */
+ clk_set_rate(clks[IMX6UL_CLK_AHB], 132000000);
+
/* set perclk to from OSC */
clk_set_parent(clks[IMX6UL_CLK_PERCLK_SEL], clks[IMX6UL_CLK_OSC]);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 71f3a94b472c..448ef321948b 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -363,6 +363,17 @@ static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_
static struct clk_onecell_data clk_data;
+static struct clk ** const uart_clks[] __initconst = {
+ &clks[IMX7D_UART1_ROOT_CLK],
+ &clks[IMX7D_UART2_ROOT_CLK],
+ &clks[IMX7D_UART3_ROOT_CLK],
+ &clks[IMX7D_UART4_ROOT_CLK],
+ &clks[IMX7D_UART5_ROOT_CLK],
+ &clks[IMX7D_UART6_ROOT_CLK],
+ &clks[IMX7D_UART7_ROOT_CLK],
+ NULL
+};
+
static void __init imx7d_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
@@ -818,6 +829,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_gate2("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0);
clks[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_gate2("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0);
clks[IMX7D_WRCLK_ROOT_CLK] = imx_clk_gate2("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0);
+ clks[IMX7D_ADC_ROOT_CLK] = imx_clk_gate2("adc_root_clk", "ipg_root_clk", base + 0x4200, 0);
clks[IMX7D_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
@@ -856,5 +868,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
/* set uart module clock's parent clock source that must be great then 80MHz */
clk_set_parent(clks[IMX7D_UART1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]);
+ imx_register_uart_clocks(uart_clks);
+
}
CLK_OF_DECLARE(imx7d, "fsl,imx7d-ccm", imx7d_clocks_init);
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index 20889d59b44d..b18f875eac6a 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -77,7 +77,7 @@ struct clk_pllv2 {
static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
u32 dp_ctl, u32 dp_op, u32 dp_mfd, u32 dp_mfn)
{
- long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
+ long mfi, mfn, mfd, pdf, ref_clk;
unsigned long dbl;
s64 temp;
@@ -87,19 +87,15 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
mfi = (mfi <= 5) ? 5 : mfi;
mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
- mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
- /* Sign extend to 32-bits */
- if (mfn >= 0x04000000) {
- mfn |= 0xFC000000;
- mfn_abs = -mfn;
- }
+ mfn = dp_mfn & MXC_PLL_DP_MFN_MASK;
+ mfn = sign_extend32(mfn, 26);
ref_clk = 2 * parent_rate;
if (dbl != 0)
ref_clk *= 2;
ref_clk /= (pdf + 1);
- temp = (u64) ref_clk * mfn_abs;
+ temp = (u64) ref_clk * abs(mfn);
do_div(temp, mfd + 1);
if (mfn < 0)
temp = -temp;
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index bff45ead7389..d1b1c95177bb 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -387,6 +387,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_SNVS] = imx_clk_gate2("snvs-rtc", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(7));
clk[VF610_CLK_DAP] = imx_clk_gate("dap", "platform_bus", CCM_CCSR, 24);
+ clk[VF610_CLK_OCOTP] = imx_clk_gate("ocotp", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(5));
imx_check_clocks(clk, ARRAY_SIZE(clk));
diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
index df12b5307175..a634b1185be3 100644
--- a/drivers/clk/imx/clk.c
+++ b/drivers/clk/imx/clk.c
@@ -73,3 +73,41 @@ void imx_cscmr1_fixup(u32 *val)
*val ^= CSCMR1_FIXUP;
return;
}
+
+static int imx_keep_uart_clocks __initdata;
+static struct clk ** const *imx_uart_clocks __initdata;
+
+static int __init imx_keep_uart_clocks_param(char *str)
+{
+ imx_keep_uart_clocks = 1;
+
+ return 0;
+}
+__setup_param("earlycon", imx_keep_uart_earlycon,
+ imx_keep_uart_clocks_param, 0);
+__setup_param("earlyprintk", imx_keep_uart_earlyprintk,
+ imx_keep_uart_clocks_param, 0);
+
+void __init imx_register_uart_clocks(struct clk ** const clks[])
+{
+ if (imx_keep_uart_clocks) {
+ int i;
+
+ imx_uart_clocks = clks;
+ for (i = 0; imx_uart_clocks[i]; i++)
+ clk_prepare_enable(*imx_uart_clocks[i]);
+ }
+}
+
+static int __init imx_clk_disable_uart(void)
+{
+ if (imx_keep_uart_clocks && imx_uart_clocks) {
+ int i;
+
+ for (i = 0; imx_uart_clocks[i]; i++)
+ clk_disable_unprepare(*imx_uart_clocks[i]);
+ }
+
+ return 0;
+}
+late_initcall_sync(imx_clk_disable_uart);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 1049b0c7d818..c94ac5c26226 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -7,6 +7,7 @@
extern spinlock_t imx_ccm_lock;
void imx_check_clocks(struct clk *clks[], unsigned int count);
+void imx_register_uart_clocks(struct clk ** const clks[]);
extern void imx_cscmr1_fixup(u32 *val);
diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
index 3f553d0ae0b5..a26ba2184454 100644
--- a/drivers/clk/keystone/pll.c
+++ b/drivers/clk/keystone/pll.c
@@ -157,7 +157,7 @@ out:
* _of_clk_init - PLL initialisation via DT
* @node: device tree node for this clock
* @pllctrl: If true, lower 6 bits of multiplier is in pllm register of
- * pll controller, else it is in the control regsiter0(bit 11-6)
+ * pll controller, else it is in the control register0(bit 11-6)
*/
static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
{
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 8e4b2a4635b9..95fdfacb2ebf 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -1,4 +1,4 @@
-obj-y += clk-mtk.o clk-pll.o clk-gate.o
+obj-y += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o
obj-$(CONFIG_RESET_CONTROLLER) += reset.o
obj-y += clk-mt8135.o
obj-y += clk-mt8173.o
diff --git a/drivers/clk/mediatek/clk-apmixed.c b/drivers/clk/mediatek/clk-apmixed.c
new file mode 100644
index 000000000000..5303c5980867
--- /dev/null
+++ b/drivers/clk/mediatek/clk-apmixed.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "clk-mtk.h"
+
+#define REF2USB_TX_EN BIT(0)
+#define REF2USB_TX_LPF_EN BIT(1)
+#define REF2USB_TX_OUT_EN BIT(2)
+#define REF2USB_EN_MASK (REF2USB_TX_EN | REF2USB_TX_LPF_EN | \
+ REF2USB_TX_OUT_EN)
+
+struct mtk_ref2usb_tx {
+ struct clk_hw hw;
+ void __iomem *base_addr;
+};
+
+static inline struct mtk_ref2usb_tx *to_mtk_ref2usb_tx(struct clk_hw *hw)
+{
+ return container_of(hw, struct mtk_ref2usb_tx, hw);
+}
+
+static int mtk_ref2usb_tx_is_prepared(struct clk_hw *hw)
+{
+ struct mtk_ref2usb_tx *tx = to_mtk_ref2usb_tx(hw);
+
+ return (readl(tx->base_addr) & REF2USB_EN_MASK) == REF2USB_EN_MASK;
+}
+
+static int mtk_ref2usb_tx_prepare(struct clk_hw *hw)
+{
+ struct mtk_ref2usb_tx *tx = to_mtk_ref2usb_tx(hw);
+ u32 val;
+
+ val = readl(tx->base_addr);
+
+ val |= REF2USB_TX_EN;
+ writel(val, tx->base_addr);
+ udelay(100);
+
+ val |= REF2USB_TX_LPF_EN;
+ writel(val, tx->base_addr);
+
+ val |= REF2USB_TX_OUT_EN;
+ writel(val, tx->base_addr);
+
+ return 0;
+}
+
+static void mtk_ref2usb_tx_unprepare(struct clk_hw *hw)
+{
+ struct mtk_ref2usb_tx *tx = to_mtk_ref2usb_tx(hw);
+ u32 val;
+
+ val = readl(tx->base_addr);
+ val &= ~REF2USB_EN_MASK;
+ writel(val, tx->base_addr);
+}
+
+static const struct clk_ops mtk_ref2usb_tx_ops = {
+ .is_prepared = mtk_ref2usb_tx_is_prepared,
+ .prepare = mtk_ref2usb_tx_prepare,
+ .unprepare = mtk_ref2usb_tx_unprepare,
+};
+
+struct clk * __init mtk_clk_register_ref2usb_tx(const char *name,
+ const char *parent_name, void __iomem *reg)
+{
+ struct mtk_ref2usb_tx *tx;
+ struct clk_init_data init = {};
+ struct clk *clk;
+
+ tx = kzalloc(sizeof(*tx), GFP_KERNEL);
+ if (!tx)
+ return ERR_PTR(-ENOMEM);
+
+ tx->base_addr = reg;
+ tx->hw.init = &init;
+
+ init.name = name;
+ init.ops = &mtk_ref2usb_tx_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &tx->hw);
+
+ if (IS_ERR(clk)) {
+ pr_err("Failed to register clk %s: %ld\n", name, PTR_ERR(clk));
+ kfree(tx);
+ }
+
+ return clk;
+}
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 57020368a693..576bdb7c98b8 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -97,7 +97,7 @@ const struct clk_ops mtk_clk_gate_ops_setclr_inv = {
.disable = mtk_cg_disable_inv,
};
-struct clk *mtk_clk_register_gate(
+struct clk * __init mtk_clk_register_gate(
const char *name,
const char *parent_name,
struct regmap *regmap,
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 90eff85f4285..227e356403d9 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -15,21 +15,28 @@
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/mfd/syscon.h>
#include "clk-mtk.h"
#include "clk-gate.h"
#include <dt-bindings/clock/mt8173-clk.h>
+/*
+ * For some clocks, we don't care what their actual rates are. And these
+ * clocks may change their rate on different products or different scenarios.
+ * So we model these clocks' rate as 0, to denote it's not an actual rate.
+ */
+#define DUMMY_RATE 0
+
static DEFINE_SPINLOCK(mt8173_clk_lock);
-static const struct mtk_fixed_factor root_clk_alias[] __initconst = {
- FACTOR(CLK_TOP_CLKPH_MCK_O, "clkph_mck_o", "clk_null", 1, 1),
- FACTOR(CLK_TOP_DPI, "dpi_ck", "clk_null", 1, 1),
- FACTOR(CLK_TOP_USB_SYSPLL_125M, "usb_syspll_125m", "clk_null", 1, 1),
- FACTOR(CLK_TOP_HDMITX_DIG_CTS, "hdmitx_dig_cts", "clk_null", 1, 1),
+static const struct mtk_fixed_clk fixed_clks[] __initconst = {
+ FIXED_CLK(CLK_TOP_CLKPH_MCK_O, "clkph_mck_o", "clk26m", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_USB_SYSPLL_125M, "usb_syspll_125m", "clk26m", 125 * MHZ),
+ FIXED_CLK(CLK_TOP_DSI0_DIG, "dsi0_dig", "clk26m", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_DSI1_DIG, "dsi1_dig", "clk26m", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_LVDS_PXL, "lvds_pxl", "lvdspll", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_LVDS_CTS, "lvds_cts", "lvdspll", DUMMY_RATE),
};
static const struct mtk_fixed_factor top_divs[] __initconst = {
@@ -54,6 +61,7 @@ static const struct mtk_fixed_factor top_divs[] __initconst = {
FACTOR(CLK_TOP_CLKRTC_INT, "clkrtc_int", "clk26m", 1, 793),
FACTOR(CLK_TOP_FPC, "fpc_ck", "clk26m", 1, 1),
+ FACTOR(CLK_TOP_HDMITX_DIG_CTS, "hdmitx_dig_cts", "tvdpll_445p5m", 1, 3),
FACTOR(CLK_TOP_HDMITXPLL_D2, "hdmitxpll_d2", "hdmitx_dig_cts", 1, 2),
FACTOR(CLK_TOP_HDMITXPLL_D3, "hdmitxpll_d3", "hdmitx_dig_cts", 1, 3),
@@ -590,7 +598,7 @@ static const struct mtk_composite top_muxes[] __initconst = {
MUX(CLK_TOP_I2S3_B_SEL, "i2s3_b_ck_sel", i2s3_b_ck_parents, 0x120, 8, 1),
};
-static const struct mtk_gate_regs infra_cg_regs = {
+static const struct mtk_gate_regs infra_cg_regs __initconst = {
.set_ofs = 0x0040,
.clr_ofs = 0x0044,
.sta_ofs = 0x0048,
@@ -612,20 +620,24 @@ static const struct mtk_gate infra_clks[] __initconst = {
GATE_ICG(CLK_INFRA_GCE, "infra_gce", "axi_sel", 6),
GATE_ICG(CLK_INFRA_L2C_SRAM, "infra_l2c_sram", "axi_sel", 7),
GATE_ICG(CLK_INFRA_M4U, "infra_m4u", "mem_sel", 8),
- GATE_ICG(CLK_INFRA_CPUM, "infra_cpum", "clk_null", 15),
+ GATE_ICG(CLK_INFRA_CPUM, "infra_cpum", "cpum_ck", 15),
GATE_ICG(CLK_INFRA_KP, "infra_kp", "axi_sel", 16),
GATE_ICG(CLK_INFRA_CEC, "infra_cec", "clk26m", 18),
GATE_ICG(CLK_INFRA_PMICSPI, "infra_pmicspi", "pmicspi_sel", 22),
GATE_ICG(CLK_INFRA_PMICWRAP, "infra_pmicwrap", "axi_sel", 23),
};
-static const struct mtk_gate_regs peri0_cg_regs = {
+static const struct mtk_fixed_factor infra_divs[] __initconst = {
+ FACTOR(CLK_INFRA_CLK_13M, "clk13m", "clk26m", 1, 2),
+};
+
+static const struct mtk_gate_regs peri0_cg_regs __initconst = {
.set_ofs = 0x0008,
.clr_ofs = 0x0010,
.sta_ofs = 0x0018,
};
-static const struct mtk_gate_regs peri1_cg_regs = {
+static const struct mtk_gate_regs peri1_cg_regs __initconst = {
.set_ofs = 0x000c,
.clr_ofs = 0x0014,
.sta_ofs = 0x001c,
@@ -701,6 +713,183 @@ static const struct mtk_composite peri_clks[] __initconst = {
MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
};
+static const struct mtk_gate_regs cg_regs_4_8_0 __initconst = {
+ .set_ofs = 0x0004,
+ .clr_ofs = 0x0008,
+ .sta_ofs = 0x0000,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &cg_regs_4_8_0, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate img_clks[] __initconst = {
+ GATE_IMG(CLK_IMG_LARB2_SMI, "img_larb2_smi", "mm_sel", 0),
+ GATE_IMG(CLK_IMG_CAM_SMI, "img_cam_smi", "mm_sel", 5),
+ GATE_IMG(CLK_IMG_CAM_CAM, "img_cam_cam", "mm_sel", 6),
+ GATE_IMG(CLK_IMG_SEN_TG, "img_sen_tg", "camtg_sel", 7),
+ GATE_IMG(CLK_IMG_SEN_CAM, "img_sen_cam", "mm_sel", 8),
+ GATE_IMG(CLK_IMG_CAM_SV, "img_cam_sv", "mm_sel", 9),
+ GATE_IMG(CLK_IMG_FD, "img_fd", "mm_sel", 11),
+};
+
+static const struct mtk_gate_regs mm0_cg_regs __initconst = {
+ .set_ofs = 0x0104,
+ .clr_ofs = 0x0108,
+ .sta_ofs = 0x0100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs __initconst = {
+ .set_ofs = 0x0114,
+ .clr_ofs = 0x0118,
+ .sta_ofs = 0x0110,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mm_clks[] __initconst = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+ GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 2),
+ GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 3),
+ GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 4),
+ GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 5),
+ GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 6),
+ GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 7),
+ GATE_MM0(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 8),
+ GATE_MM0(CLK_MM_MDP_TDSHP1, "mm_mdp_tdshp1", "mm_sel", 9),
+ GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
+ GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12),
+ GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13),
+ GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14),
+ GATE_MM0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "rtc_sel", 15),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 16),
+ GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 17),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 18),
+ GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
+ GATE_MM0(CLK_MM_DISP_RDMA2, "mm_disp_rdma2", "mm_sel", 20),
+ GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21),
+ GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 23),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "mm_sel", 24),
+ GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25),
+ GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26),
+ GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 27),
+ GATE_MM0(CLK_MM_DISP_SPLIT0, "mm_disp_split0", "mm_sel", 28),
+ GATE_MM0(CLK_MM_DISP_SPLIT1, "mm_disp_split1", "mm_sel", 29),
+ GATE_MM0(CLK_MM_DISP_MERGE, "mm_disp_merge", "mm_sel", 30),
+ GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 31),
+ /* MM1 */
+ GATE_MM1(CLK_MM_DISP_PWM0MM, "mm_disp_pwm0mm", "mm_sel", 0),
+ GATE_MM1(CLK_MM_DISP_PWM026M, "mm_disp_pwm026m", "pwm_sel", 1),
+ GATE_MM1(CLK_MM_DISP_PWM1MM, "mm_disp_pwm1mm", "mm_sel", 2),
+ GATE_MM1(CLK_MM_DISP_PWM126M, "mm_disp_pwm126m", "pwm_sel", 3),
+ GATE_MM1(CLK_MM_DSI0_ENGINE, "mm_dsi0_engine", "mm_sel", 4),
+ GATE_MM1(CLK_MM_DSI0_DIGITAL, "mm_dsi0_digital", "dsi0_dig", 5),
+ GATE_MM1(CLK_MM_DSI1_ENGINE, "mm_dsi1_engine", "mm_sel", 6),
+ GATE_MM1(CLK_MM_DSI1_DIGITAL, "mm_dsi1_digital", "dsi1_dig", 7),
+ GATE_MM1(CLK_MM_DPI_PIXEL, "mm_dpi_pixel", "dpi0_sel", 8),
+ GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
+ GATE_MM1(CLK_MM_DPI1_PIXEL, "mm_dpi1_pixel", "lvds_pxl", 10),
+ GATE_MM1(CLK_MM_DPI1_ENGINE, "mm_dpi1_engine", "mm_sel", 11),
+ GATE_MM1(CLK_MM_HDMI_PIXEL, "mm_hdmi_pixel", "dpi0_sel", 12),
+ GATE_MM1(CLK_MM_HDMI_PLLCK, "mm_hdmi_pllck", "hdmi_sel", 13),
+ GATE_MM1(CLK_MM_HDMI_AUDIO, "mm_hdmi_audio", "apll1", 14),
+ GATE_MM1(CLK_MM_HDMI_SPDIF, "mm_hdmi_spdif", "apll2", 15),
+ GATE_MM1(CLK_MM_LVDS_PIXEL, "mm_lvds_pixel", "lvds_pxl", 16),
+ GATE_MM1(CLK_MM_LVDS_CTS, "mm_lvds_cts", "lvds_cts", 17),
+ GATE_MM1(CLK_MM_SMI_LARB4, "mm_smi_larb4", "mm_sel", 18),
+ GATE_MM1(CLK_MM_HDMI_HDCP, "mm_hdmi_hdcp", "hdcp_sel", 19),
+ GATE_MM1(CLK_MM_HDMI_HDCP24M, "mm_hdmi_hdcp24m", "hdcp_24m_sel", 20),
+};
+
+static const struct mtk_gate_regs vdec0_cg_regs __initconst = {
+ .set_ofs = 0x0000,
+ .clr_ofs = 0x0004,
+ .sta_ofs = 0x0000,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs __initconst = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x000c,
+ .sta_ofs = 0x0008,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vdec0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vdec1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate vdec_clks[] __initconst = {
+ GATE_VDEC0(CLK_VDEC_CKEN, "vdec_cken", "vdec_sel", 0),
+ GATE_VDEC1(CLK_VDEC_LARB_CKEN, "vdec_larb_cken", "mm_sel", 0),
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &cg_regs_4_8_0, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate venc_clks[] __initconst = {
+ GATE_VENC(CLK_VENC_CKE0, "venc_cke0", "mm_sel", 0),
+ GATE_VENC(CLK_VENC_CKE1, "venc_cke1", "venc_sel", 4),
+ GATE_VENC(CLK_VENC_CKE2, "venc_cke2", "venc_sel", 8),
+ GATE_VENC(CLK_VENC_CKE3, "venc_cke3", "venc_sel", 12),
+};
+
+#define GATE_VENCLT(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &cg_regs_4_8_0, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate venclt_clks[] __initconst = {
+ GATE_VENCLT(CLK_VENCLT_CKE0, "venclt_cke0", "mm_sel", 0),
+ GATE_VENCLT(CLK_VENCLT_CKE1, "venclt_cke1", "venclt_sel", 4),
+};
+
static struct clk_onecell_data *mt8173_top_clk_data __initdata;
static struct clk_onecell_data *mt8173_pll_clk_data __initdata;
@@ -731,7 +920,7 @@ static void __init mtk_topckgen_init(struct device_node *node)
mt8173_top_clk_data = clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
- mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data);
+ mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
&mt8173_clk_lock, clk_data);
@@ -754,6 +943,7 @@ static void __init mtk_infrasys_init(struct device_node *node)
mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
clk_data);
+ mtk_clk_register_factors(infra_divs, ARRAY_SIZE(infra_divs), clk_data);
r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
if (r)
@@ -792,6 +982,24 @@ static void __init mtk_pericfg_init(struct device_node *node)
}
CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8173-pericfg", mtk_pericfg_init);
+struct mtk_clk_usb {
+ int id;
+ const char *name;
+ const char *parent;
+ u32 reg_ofs;
+};
+
+#define APMIXED_USB(_id, _name, _parent, _reg_ofs) { \
+ .id = _id, \
+ .name = _name, \
+ .parent = _parent, \
+ .reg_ofs = _reg_ofs, \
+ }
+
+static const struct mtk_clk_usb apmixed_usb[] __initconst = {
+ APMIXED_USB(CLK_APMIXED_REF2USB_TX, "ref2usb_tx", "clk26m", 0x8),
+};
+
#define MT8173_PLL_FMAX (3000UL * MHZ)
#define CON0_MT8173_RST_BAR BIT(24)
@@ -852,6 +1060,15 @@ static const struct mtk_pll_data plls[] = {
static void __init mtk_apmixedsys_init(struct device_node *node)
{
struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ struct clk *clk;
+ int r, i;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return;
+ }
mt8173_pll_clk_data = clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
if (!clk_data)
@@ -859,7 +1076,113 @@ static void __init mtk_apmixedsys_init(struct device_node *node)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ for (i = 0; i < ARRAY_SIZE(apmixed_usb); i++) {
+ const struct mtk_clk_usb *cku = &apmixed_usb[i];
+
+ clk = mtk_clk_register_ref2usb_tx(cku->name, cku->parent,
+ base + cku->reg_ofs);
+
+ if (IS_ERR(clk)) {
+ pr_err("Failed to register clk %s: %ld\n", cku->name,
+ PTR_ERR(clk));
+ continue;
+ }
+
+ clk_data->clks[cku->id] = clk;
+ }
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
mtk_clk_enable_critical();
}
CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys",
mtk_apmixedsys_init);
+
+static void __init mtk_imgsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+
+ mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_imgsys, "mediatek,mt8173-imgsys", mtk_imgsys_init);
+
+static void __init mtk_mmsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+
+ mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_mmsys, "mediatek,mt8173-mmsys", mtk_mmsys_init);
+
+static void __init mtk_vdecsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
+
+ mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_vdecsys, "mediatek,mt8173-vdecsys", mtk_vdecsys_init);
+
+static void __init mtk_vencsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
+
+ mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_vencsys, "mediatek,mt8173-vencsys", mtk_vencsys_init);
+
+static void __init mtk_vencltsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_VENCLT_NR_CLK);
+
+ mtk_clk_register_gates(node, venclt_clks, ARRAY_SIZE(venclt_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_vencltsys, "mediatek,mt8173-vencltsys", mtk_vencltsys_init);
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 18444aea63c9..cf08db6c130c 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -24,7 +24,7 @@
#include "clk-mtk.h"
#include "clk-gate.h"
-struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num)
+struct clk_onecell_data * __init mtk_alloc_clk_data(unsigned int clk_num)
{
int i;
struct clk_onecell_data *clk_data;
@@ -49,8 +49,31 @@ err_out:
return NULL;
}
-void mtk_clk_register_factors(const struct mtk_fixed_factor *clks, int num,
- struct clk_onecell_data *clk_data)
+void __init mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
+ int num, struct clk_onecell_data *clk_data)
+{
+ int i;
+ struct clk *clk;
+
+ for (i = 0; i < num; i++) {
+ const struct mtk_fixed_clk *rc = &clks[i];
+
+ clk = clk_register_fixed_rate(NULL, rc->name, rc->parent,
+ rc->parent ? 0 : CLK_IS_ROOT, rc->rate);
+
+ if (IS_ERR(clk)) {
+ pr_err("Failed to register clk %s: %ld\n",
+ rc->name, PTR_ERR(clk));
+ continue;
+ }
+
+ if (clk_data)
+ clk_data->clks[rc->id] = clk;
+ }
+}
+
+void __init mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
+ int num, struct clk_onecell_data *clk_data)
{
int i;
struct clk *clk;
@@ -72,7 +95,8 @@ void mtk_clk_register_factors(const struct mtk_fixed_factor *clks, int num,
}
}
-int mtk_clk_register_gates(struct device_node *node, const struct mtk_gate *clks,
+int __init mtk_clk_register_gates(struct device_node *node,
+ const struct mtk_gate *clks,
int num, struct clk_onecell_data *clk_data)
{
int i;
@@ -111,7 +135,7 @@ int mtk_clk_register_gates(struct device_node *node, const struct mtk_gate *clks
return 0;
}
-struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
+struct clk * __init mtk_clk_register_composite(const struct mtk_composite *mc,
void __iomem *base, spinlock_t *lock)
{
struct clk *clk;
@@ -196,7 +220,7 @@ err_out:
return ERR_PTR(ret);
}
-void mtk_clk_register_composites(const struct mtk_composite *mcs,
+void __init mtk_clk_register_composites(const struct mtk_composite *mcs,
int num, void __iomem *base, spinlock_t *lock,
struct clk_onecell_data *clk_data)
{
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index c5cbecb3d218..32d2e455eb3f 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -26,6 +26,23 @@ struct clk;
#define MHZ (1000 * 1000)
+struct mtk_fixed_clk {
+ int id;
+ const char *name;
+ const char *parent;
+ unsigned long rate;
+};
+
+#define FIXED_CLK(_id, _name, _parent, _rate) { \
+ .id = _id, \
+ .name = _name, \
+ .parent = _parent, \
+ .rate = _rate, \
+ }
+
+void mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
+ int num, struct clk_onecell_data *clk_data);
+
struct mtk_fixed_factor {
int id;
const char *name;
@@ -42,7 +59,7 @@ struct mtk_fixed_factor {
.div = _div, \
}
-extern void mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
+void mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
int num, struct clk_onecell_data *clk_data);
struct mtk_composite {
@@ -159,10 +176,13 @@ struct mtk_pll_data {
const struct mtk_pll_div_table *div_table;
};
-void __init mtk_clk_register_plls(struct device_node *node,
+void mtk_clk_register_plls(struct device_node *node,
const struct mtk_pll_data *plls, int num_plls,
struct clk_onecell_data *clk_data);
+struct clk *mtk_clk_register_ref2usb_tx(const char *name,
+ const char *parent_name, void __iomem *reg);
+
#ifdef CONFIG_RESET_CONTROLLER
void mtk_register_reset_controller(struct device_node *np,
unsigned int num_regs, int regofs);
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index 622e7b6c62b4..966cab1348da 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -317,7 +317,7 @@ void __init mtk_clk_register_plls(struct device_node *node,
const struct mtk_pll_data *plls, int num_plls, struct clk_onecell_data *clk_data)
{
void __iomem *base;
- int r, i;
+ int i;
struct clk *clk;
base = of_iomap(node, 0);
@@ -339,9 +339,4 @@ void __init mtk_clk_register_plls(struct device_node *node,
clk_data->clks[pll->id] = clk;
}
-
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
}
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 85da8b983256..5837eb8a212f 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -197,7 +197,6 @@ static void __init of_cpu_clk_setup(struct device_node *node)
for_each_node_by_type(dn, "cpu") {
struct clk_init_data init;
struct clk *clk;
- struct clk *parent_clk;
char *clk_name = kzalloc(5, GFP_KERNEL);
int cpu, err;
@@ -209,9 +208,8 @@ static void __init of_cpu_clk_setup(struct device_node *node)
goto bail_out;
sprintf(clk_name, "cpu%d", cpu);
- parent_clk = of_clk_get(node, 0);
- cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
+ cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
cpuclk[cpu].clk_name = clk_name;
cpuclk[cpu].cpu = cpu;
cpuclk[cpu].reg_base = clock_complex_base;
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index 4a22429cd7a2..28aac67e7b92 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -165,7 +165,7 @@ void __init mvebu_coreclk_setup(struct device_node *np,
clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name,
cpuclk_name, 0, mult, div);
WARN_ON(IS_ERR(clk_data.clks[2+n]));
- };
+ }
/* Register optional refclk */
if (desc->get_refclk_freq) {
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
index 73f0240569ac..f8dd10f6df3d 100644
--- a/drivers/clk/mxs/clk-frac.c
+++ b/drivers/clk/mxs/clk-frac.c
@@ -41,11 +41,13 @@ static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
{
struct clk_frac *frac = to_clk_frac(hw);
u32 div;
+ u64 tmp_rate;
div = readl_relaxed(frac->reg) >> frac->shift;
div &= (1 << frac->width) - 1;
- return (parent_rate >> frac->width) * div;
+ tmp_rate = (u64)parent_rate * div;
+ return tmp_rate >> frac->width;
}
static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -54,7 +56,7 @@ static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
struct clk_frac *frac = to_clk_frac(hw);
unsigned long parent_rate = *prate;
u32 div;
- u64 tmp;
+ u64 tmp, tmp_rate, result;
if (rate > parent_rate)
return -EINVAL;
@@ -67,7 +69,11 @@ static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
if (!div)
return -EINVAL;
- return (parent_rate >> frac->width) * div;
+ tmp_rate = (u64)parent_rate * div;
+ result = tmp_rate >> frac->width;
+ if ((result << frac->width) < tmp_rate)
+ result += 1;
+ return result;
}
static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index eeaee97da110..13aabbb3acbe 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -179,9 +179,22 @@ static void lpc18xx_ccu_gate_disable(struct clk_hw *hw)
static int lpc18xx_ccu_gate_is_enabled(struct clk_hw *hw)
{
- struct clk_gate *gate = to_clk_gate(hw);
+ const struct clk_hw *parent;
+
+ /*
+ * The branch clock registers are only accessible
+ * if the base (parent) clock is enabled. Register
+ * access with a disabled base clock will hang the
+ * system.
+ */
+ parent = clk_hw_get_parent(hw);
+ if (!parent)
+ return 0;
+
+ if (!clk_hw_is_enabled(parent))
+ return 0;
- return clk_readl(gate->reg) & LPC18XX_CCU_RUN;
+ return clk_gate_ops.is_enabled(hw);
}
static const struct clk_ops lpc18xx_ccu_gate_ops = {
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index e0a3cb8970ab..c924572fc9bc 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -480,6 +480,42 @@ static const struct clk_ops lpc18xx_pll1_ops = {
.recalc_rate = lpc18xx_pll1_recalc_rate,
};
+static int lpc18xx_cgu_gate_enable(struct clk_hw *hw)
+{
+ return clk_gate_ops.enable(hw);
+}
+
+static void lpc18xx_cgu_gate_disable(struct clk_hw *hw)
+{
+ clk_gate_ops.disable(hw);
+}
+
+static int lpc18xx_cgu_gate_is_enabled(struct clk_hw *hw)
+{
+ const struct clk_hw *parent;
+
+ /*
+ * The consumer of base clocks needs know if the
+ * base clock is really enabled before it can be
+ * accessed. It is therefore necessary to verify
+ * this all the way up.
+ */
+ parent = clk_hw_get_parent(hw);
+ if (!parent)
+ return 0;
+
+ if (!clk_hw_is_enabled(parent))
+ return 0;
+
+ return clk_gate_ops.is_enabled(hw);
+}
+
+static const struct clk_ops lpc18xx_gate_ops = {
+ .enable = lpc18xx_cgu_gate_enable,
+ .disable = lpc18xx_cgu_gate_disable,
+ .is_enabled = lpc18xx_cgu_gate_is_enabled,
+};
+
static struct lpc18xx_cgu_pll_clk lpc18xx_cgu_src_clk_plls[] = {
LPC1XX_CGU_CLK_PLL(PLL0USB, pll0_src_ids, pll0_ops),
LPC1XX_CGU_CLK_PLL(PLL0AUDIO, pll0_src_ids, pll0_ops),
@@ -510,7 +546,7 @@ static struct clk *lpc18xx_cgu_register_div(struct lpc18xx_cgu_src_clk_div *clk,
return clk_register_composite(NULL, name, parents, clk->n_parents,
&clk->mux.hw, &clk_mux_ops,
&clk->div.hw, &clk_divider_ops,
- &clk->gate.hw, &clk_gate_ops, 0);
+ &clk->gate.hw, &lpc18xx_gate_ops, 0);
}
@@ -538,7 +574,7 @@ static struct clk *lpc18xx_register_base_clk(struct lpc18xx_cgu_base_clk *clk,
return clk_register_composite(NULL, name, parents, clk->n_parents,
&clk->mux.hw, &clk_mux_ops,
NULL, NULL,
- &clk->gate.hw, &clk_gate_ops, 0);
+ &clk->gate.hw, &lpc18xx_gate_ops, 0);
}
@@ -557,7 +593,7 @@ static struct clk *lpc18xx_cgu_register_pll(struct lpc18xx_cgu_pll_clk *clk,
return clk_register_composite(NULL, name, parents, clk->n_parents,
&clk->mux.hw, &clk_mux_ops,
&clk->pll.hw, clk->pll_ops,
- &clk->gate.hw, &clk_gate_ops, 0);
+ &clk->gate.hw, &lpc18xx_gate_ops, 0);
}
static void __init lpc18xx_cgu_register_source_clks(struct device_node *np,
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 59d16668bdf5..ee4c83aab4f4 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -1,3 +1,7 @@
+config QCOM_GDSC
+ bool
+ select PM_GENERIC_DOMAINS if PM
+
config COMMON_CLK_QCOM
tristate "Support for Qualcomm's clock controllers"
depends on OF
@@ -7,6 +11,7 @@ config COMMON_CLK_QCOM
config APQ_GCC_8084
tristate "APQ8084 Global Clock Controller"
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on apq8084 devices.
@@ -16,6 +21,7 @@ config APQ_GCC_8084
config APQ_MMCC_8084
tristate "APQ8084 Multimedia Clock Controller"
select APQ_GCC_8084
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the multimedia clock controller on apq8084 devices.
@@ -49,6 +55,7 @@ config MSM_GCC_8660
config MSM_GCC_8916
tristate "MSM8916 Global Clock Controller"
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8916 devices.
@@ -83,6 +90,7 @@ config MSM_MMCC_8960
config MSM_GCC_8974
tristate "MSM8974 Global Clock Controller"
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8974 devices.
@@ -92,6 +100,7 @@ config MSM_GCC_8974
config MSM_MMCC_8974
tristate "MSM8974 Multimedia Clock Controller"
select MSM_GCC_8974
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the multimedia clock controller on msm8974 devices.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 50b337a24a87..fe6252349e55 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -9,6 +9,7 @@ clk-qcom-y += clk-branch.o
clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
clk-qcom-y += reset.o
+clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index bccedc4b5756..bfbb28f450c2 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -542,6 +542,200 @@ static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate,
return __clk_rcg_set_rate(rcg, rcg->freq_tbl);
}
+static int clk_rcg_bypass2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *p;
+
+ p = req->best_parent_hw;
+ req->best_parent_rate = clk_hw_round_rate(p, req->rate);
+ req->rate = req->best_parent_rate;
+
+ return 0;
+}
+
+static int clk_rcg_bypass2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ struct freq_tbl f = { 0 };
+ u32 ns, src;
+ int i, ret, num_parents = clk_hw_get_num_parents(hw);
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ if (ret)
+ return ret;
+
+ src = ns_to_src(&rcg->s, ns);
+ f.pre_div = ns_to_pre_div(&rcg->p, ns) + 1;
+
+ for (i = 0; i < num_parents; i++) {
+ if (src == rcg->s.parent_map[i].cfg) {
+ f.src = rcg->s.parent_map[i].src;
+ return __clk_rcg_set_rate(rcg, &f);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int clk_rcg_bypass2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Read the hardware to determine parent during set_rate */
+ return clk_rcg_bypass2_set_rate(hw, rate, parent_rate);
+}
+
+struct frac_entry {
+ int num;
+ int den;
+};
+
+static const struct frac_entry pixel_table[] = {
+ { 1, 2 },
+ { 1, 3 },
+ { 3, 16 },
+ { }
+};
+
+static int clk_rcg_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ int delta = 100000;
+ const struct frac_entry *frac = pixel_table;
+ unsigned long request, src_rate;
+
+ for (; frac->num; frac++) {
+ request = (req->rate * frac->den) / frac->num;
+
+ src_rate = clk_hw_round_rate(req->best_parent_hw, request);
+
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ req->best_parent_rate = src_rate;
+ req->rate = (src_rate * frac->num) / frac->den;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int clk_rcg_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ int delta = 100000;
+ const struct frac_entry *frac = pixel_table;
+ unsigned long request;
+ struct freq_tbl f = { 0 };
+ u32 ns, src;
+ int i, ret, num_parents = clk_hw_get_num_parents(hw);
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ if (ret)
+ return ret;
+
+ src = ns_to_src(&rcg->s, ns);
+ f.pre_div = ns_to_pre_div(&rcg->p, ns) + 1;
+
+ for (i = 0; i < num_parents; i++) {
+ if (src == rcg->s.parent_map[i].cfg) {
+ f.src = rcg->s.parent_map[i].src;
+ break;
+ }
+ }
+
+ /* let us find appropriate m/n values for this */
+ for (; frac->num; frac++) {
+ request = (rate * frac->den) / frac->num;
+
+ if ((parent_rate < (request - delta)) ||
+ (parent_rate > (request + delta)))
+ continue;
+
+ f.m = frac->num;
+ f.n = frac->den;
+
+ return __clk_rcg_set_rate(rcg, &f);
+ }
+
+ return -EINVAL;
+}
+
+static int clk_rcg_pixel_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return clk_rcg_pixel_set_rate(hw, rate, parent_rate);
+}
+
+static int clk_rcg_esc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ int pre_div_max = BIT(rcg->p.pre_div_width);
+ int div;
+ unsigned long src_rate;
+
+ if (req->rate == 0)
+ return -EINVAL;
+
+ src_rate = clk_hw_get_rate(req->best_parent_hw);
+
+ div = src_rate / req->rate;
+
+ if (div >= 1 && div <= pre_div_max) {
+ req->best_parent_rate = src_rate;
+ req->rate = src_rate / div;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int clk_rcg_esc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ struct freq_tbl f = { 0 };
+ int pre_div_max = BIT(rcg->p.pre_div_width);
+ int div;
+ u32 ns;
+ int i, ret, num_parents = clk_hw_get_num_parents(hw);
+
+ if (rate == 0)
+ return -EINVAL;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ if (ret)
+ return ret;
+
+ ns = ns_to_src(&rcg->s, ns);
+
+ for (i = 0; i < num_parents; i++) {
+ if (ns == rcg->s.parent_map[i].cfg) {
+ f.src = rcg->s.parent_map[i].src;
+ break;
+ }
+ }
+
+ div = parent_rate / rate;
+
+ if (div >= 1 && div <= pre_div_max) {
+ f.pre_div = div;
+ return __clk_rcg_set_rate(rcg, &f);
+ }
+
+ return -EINVAL;
+}
+
+static int clk_rcg_esc_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return clk_rcg_esc_set_rate(hw, rate, parent_rate);
+}
+
/*
* This type of clock has a glitch-free mux that switches between the output of
* the M/N counter and an always on clock source (XO). When clk_set_rate() is
@@ -639,6 +833,42 @@ const struct clk_ops clk_rcg_bypass_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops);
+const struct clk_ops clk_rcg_bypass2_ops = {
+ .enable = clk_enable_regmap,
+ .disable = clk_disable_regmap,
+ .get_parent = clk_rcg_get_parent,
+ .set_parent = clk_rcg_set_parent,
+ .recalc_rate = clk_rcg_recalc_rate,
+ .determine_rate = clk_rcg_bypass2_determine_rate,
+ .set_rate = clk_rcg_bypass2_set_rate,
+ .set_rate_and_parent = clk_rcg_bypass2_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg_bypass2_ops);
+
+const struct clk_ops clk_rcg_pixel_ops = {
+ .enable = clk_enable_regmap,
+ .disable = clk_disable_regmap,
+ .get_parent = clk_rcg_get_parent,
+ .set_parent = clk_rcg_set_parent,
+ .recalc_rate = clk_rcg_recalc_rate,
+ .determine_rate = clk_rcg_pixel_determine_rate,
+ .set_rate = clk_rcg_pixel_set_rate,
+ .set_rate_and_parent = clk_rcg_pixel_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg_pixel_ops);
+
+const struct clk_ops clk_rcg_esc_ops = {
+ .enable = clk_enable_regmap,
+ .disable = clk_disable_regmap,
+ .get_parent = clk_rcg_get_parent,
+ .set_parent = clk_rcg_set_parent,
+ .recalc_rate = clk_rcg_recalc_rate,
+ .determine_rate = clk_rcg_esc_determine_rate,
+ .set_rate = clk_rcg_esc_set_rate,
+ .set_rate_and_parent = clk_rcg_esc_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg_esc_ops);
+
const struct clk_ops clk_rcg_lcc_ops = {
.enable = clk_rcg_lcc_enable,
.disable = clk_rcg_lcc_disable,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 56028bb31d87..4b1e94bdf29e 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -106,6 +106,9 @@ struct clk_rcg {
extern const struct clk_ops clk_rcg_ops;
extern const struct clk_ops clk_rcg_bypass_ops;
+extern const struct clk_ops clk_rcg_bypass2_ops;
+extern const struct clk_ops clk_rcg_pixel_ops;
+extern const struct clk_ops clk_rcg_esc_ops;
extern const struct clk_ops clk_rcg_lcc_ops;
#define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
@@ -153,8 +156,8 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @hid_width: number of bits in half integer divider
* @parent_map: map from software's parent index to hardware's src_sel field
* @freq_tbl: frequency table
+ * @current_freq: last cached frequency when using branches with shared RCGs
* @clkr: regmap clock handle
- * @lock: register lock
*
*/
struct clk_rcg2 {
@@ -163,14 +166,17 @@ struct clk_rcg2 {
u8 hid_width;
const struct parent_map *parent_map;
const struct freq_tbl *freq_tbl;
+ unsigned long current_freq;
struct clk_regmap clkr;
};
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_shared_ops;
extern const struct clk_ops clk_edp_pixel_ops;
extern const struct clk_ops clk_byte_ops;
+extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops;
#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 9aec1761fd29..b544bb302f79 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -300,6 +300,85 @@ const struct clk_ops clk_rcg2_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+static int clk_rcg2_shared_force_enable(struct clk_hw *hw, unsigned long rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const char *name = clk_hw_get_name(hw);
+ int ret, count;
+
+ /* force enable RCG */
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, CMD_ROOT_EN);
+ if (ret)
+ return ret;
+
+ /* wait for RCG to turn ON */
+ for (count = 500; count > 0; count--) {
+ ret = clk_rcg2_is_enabled(hw);
+ if (ret)
+ break;
+ udelay(1);
+ }
+ if (!count)
+ pr_err("%s: RCG did not turn on\n", name);
+
+ /* set clock rate */
+ ret = __clk_rcg2_set_rate(hw, rate);
+ if (ret)
+ return ret;
+
+ /* clear force enable RCG */
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, 0);
+}
+
+static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ /* cache the rate */
+ rcg->current_freq = rate;
+
+ if (!__clk_is_enabled(hw->clk))
+ return 0;
+
+ return clk_rcg2_shared_force_enable(hw, rcg->current_freq);
+}
+
+static unsigned long
+clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return rcg->current_freq = clk_rcg2_recalc_rate(hw, parent_rate);
+}
+
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return clk_rcg2_shared_force_enable(hw, rcg->current_freq);
+}
+
+static void clk_rcg2_shared_disable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ /* switch to XO, which is the lowest entry in the freq table */
+ clk_rcg2_shared_set_rate(hw, rcg->freq_tbl[0].freq, 0);
+}
+
+const struct clk_ops clk_rcg2_shared_ops = {
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_get_parent,
+ .recalc_rate = clk_rcg2_shared_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_shared_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
+
struct frac_entry {
int num;
int den;
@@ -485,6 +564,76 @@ const struct clk_ops clk_byte_ops = {
};
EXPORT_SYMBOL_GPL(clk_byte_ops);
+static int clk_byte2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+ unsigned long rate = req->rate;
+
+ if (rate == 0)
+ return -EINVAL;
+
+ p = req->best_parent_hw;
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ unsigned long div;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 cfg;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++) {
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ return clk_rcg2_configure(rcg, &f);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Read the hardware to determine parent during set_rate */
+ return clk_byte2_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte2_set_rate,
+ .set_rate_and_parent = clk_byte2_set_rate_and_parent,
+ .determine_rate = clk_byte2_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte2_ops);
+
static const struct frac_entry frac_table_pixel[] = {
{ 3, 8 },
{ 2, 9 },
@@ -496,14 +645,9 @@ static const struct frac_entry frac_table_pixel[] = {
static int clk_pixel_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
unsigned long request, src_rate;
int delta = 100000;
- const struct freq_tbl *f = rcg->freq_tbl;
const struct frac_entry *frac = frac_table_pixel;
- int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
-
- req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
for (; frac->num; frac++) {
request = (req->rate * frac->den) / frac->num;
@@ -525,12 +669,23 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- struct freq_tbl f = *rcg->freq_tbl;
+ struct freq_tbl f = { 0 };
const struct frac_entry *frac = frac_table_pixel;
unsigned long request;
int delta = 100000;
u32 mask = BIT(rcg->hid_width) - 1;
- u32 hid_div;
+ u32 hid_div, cfg;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ break;
+ }
for (; frac->num; frac++) {
request = (rate * frac->den) / frac->num;
@@ -555,7 +710,6 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate, u8 index)
{
- /* Parent index is set statically in frequency table */
return clk_pixel_set_rate(hw, rate, parent_rate);
}
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 2dedceefd21d..8fa477293ae0 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -22,6 +22,7 @@
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "reset.h"
+#include "gdsc.h"
struct qcom_cc {
struct qcom_reset_controller reset;
@@ -72,6 +73,21 @@ qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
}
EXPORT_SYMBOL_GPL(qcom_cc_map);
+static void qcom_cc_del_clk_provider(void *data)
+{
+ of_clk_del_provider(data);
+}
+
+static void qcom_cc_reset_unregister(void *data)
+{
+ reset_controller_unregister(data);
+}
+
+static void qcom_cc_gdsc_unregister(void *data)
+{
+ gdsc_unregister(data);
+}
+
int qcom_cc_really_probe(struct platform_device *pdev,
const struct qcom_cc_desc *desc, struct regmap *regmap)
{
@@ -110,6 +126,8 @@ int qcom_cc_really_probe(struct platform_device *pdev,
if (ret)
return ret;
+ devm_add_action(dev, qcom_cc_del_clk_provider, pdev->dev.of_node);
+
reset = &cc->reset;
reset->rcdev.of_node = dev->of_node;
reset->rcdev.ops = &qcom_reset_ops;
@@ -117,13 +135,24 @@ int qcom_cc_really_probe(struct platform_device *pdev,
reset->rcdev.nr_resets = desc->num_resets;
reset->regmap = regmap;
reset->reset_map = desc->resets;
- platform_set_drvdata(pdev, &reset->rcdev);
ret = reset_controller_register(&reset->rcdev);
if (ret)
- of_clk_del_provider(dev->of_node);
+ return ret;
+
+ devm_add_action(dev, qcom_cc_reset_unregister, &reset->rcdev);
+
+ if (desc->gdscs && desc->num_gdscs) {
+ ret = gdsc_register(dev, desc->gdscs, desc->num_gdscs,
+ &reset->rcdev, regmap);
+ if (ret)
+ return ret;
+ }
+
+ devm_add_action(dev, qcom_cc_gdsc_unregister, dev);
- return ret;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
@@ -139,11 +168,4 @@ int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
}
EXPORT_SYMBOL_GPL(qcom_cc_probe);
-void qcom_cc_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
- reset_controller_unregister(platform_get_drvdata(pdev));
-}
-EXPORT_SYMBOL_GPL(qcom_cc_remove);
-
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 7a0e73713063..7c1fba3ebc03 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -28,6 +28,8 @@ struct qcom_cc_desc {
size_t num_clks;
const struct qcom_reset_map *resets;
size_t num_resets;
+ struct gdsc **gdscs;
+ size_t num_gdscs;
};
extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
@@ -43,6 +45,4 @@ extern int qcom_cc_really_probe(struct platform_device *pdev,
extern int qcom_cc_probe(struct platform_device *pdev,
const struct qcom_cc_desc *desc);
-extern void qcom_cc_remove(struct platform_device *pdev);
-
#endif
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index 3563019b8e3c..1567c3a79534 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -31,6 +31,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -3254,6 +3255,38 @@ static struct clk_branch gcc_usb_hsic_system_clk = {
},
};
+static struct gdsc usb_hs_hsic_gdsc = {
+ .gdscr = 0x404,
+ .pd = {
+ .name = "usb_hs_hsic",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie0_gdsc = {
+ .gdscr = 0x1ac4,
+ .pd = {
+ .name = "pcie0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie1_gdsc = {
+ .gdscr = 0x1b44,
+ .pd = {
+ .name = "pcie1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_gdsc = {
+ .gdscr = 0x1e84,
+ .pd = {
+ .name = "usb30",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct clk_regmap *gcc_apq8084_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL0_VOTE] = &gpll0_vote,
@@ -3447,6 +3480,13 @@ static struct clk_regmap *gcc_apq8084_clocks[] = {
[GCC_USB_HSIC_SYSTEM_CLK] = &gcc_usb_hsic_system_clk.clkr,
};
+static struct gdsc *gcc_apq8084_gdscs[] = {
+ [USB_HS_HSIC_GDSC] = &usb_hs_hsic_gdsc,
+ [PCIE0_GDSC] = &pcie0_gdsc,
+ [PCIE1_GDSC] = &pcie1_gdsc,
+ [USB30_GDSC] = &usb30_gdsc,
+};
+
static const struct qcom_reset_map gcc_apq8084_resets[] = {
[GCC_SYSTEM_NOC_BCR] = { 0x0100 },
[GCC_CONFIG_NOC_BCR] = { 0x0140 },
@@ -3555,6 +3595,8 @@ static const struct qcom_cc_desc gcc_apq8084_desc = {
.num_clks = ARRAY_SIZE(gcc_apq8084_clocks),
.resets = gcc_apq8084_resets,
.num_resets = ARRAY_SIZE(gcc_apq8084_resets),
+ .gdscs = gcc_apq8084_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_apq8084_gdscs),
};
static const struct of_device_id gcc_apq8084_match_table[] = {
@@ -3581,15 +3623,8 @@ static int gcc_apq8084_probe(struct platform_device *pdev)
return qcom_cc_probe(pdev, &gcc_apq8084_desc);
}
-static int gcc_apq8084_remove(struct platform_device *pdev)
-{
- qcom_cc_remove(pdev);
- return 0;
-}
-
static struct platform_driver gcc_apq8084_driver = {
.probe = gcc_apq8084_probe,
- .remove = gcc_apq8084_remove,
.driver = {
.name = "gcc-apq8084",
.of_match_table = gcc_apq8084_match_table,
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 40e480220cd3..16fc64c082a5 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -3058,15 +3058,8 @@ static int gcc_ipq806x_probe(struct platform_device *pdev)
return 0;
}
-static int gcc_ipq806x_remove(struct platform_device *pdev)
-{
- qcom_cc_remove(pdev);
- return 0;
-}
-
static struct platform_driver gcc_ipq806x_driver = {
.probe = gcc_ipq806x_probe,
- .remove = gcc_ipq806x_remove,
.driver = {
.name = "gcc-ipq806x",
.of_match_table = gcc_ipq806x_match_table,
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index b02826ed770a..f110bb5a1df3 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -2735,15 +2735,8 @@ static int gcc_msm8660_probe(struct platform_device *pdev)
return qcom_cc_probe(pdev, &gcc_msm8660_desc);
}
-static int gcc_msm8660_remove(struct platform_device *pdev)
-{
- qcom_cc_remove(pdev);
- return 0;
-}
-
static struct platform_driver gcc_msm8660_driver = {
.probe = gcc_msm8660_probe,
- .remove = gcc_msm8660_remove,
.driver = {
.name = "gcc-msm8660",
.of_match_table = gcc_msm8660_match_table,
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 22a4e1e732c0..d0a0313d6bef 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -31,6 +31,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -44,6 +45,9 @@ enum {
P_SLEEP_CLK,
P_DSI0_PHYPLL_BYTE,
P_DSI0_PHYPLL_DSI,
+ P_EXT_PRI_I2S,
+ P_EXT_SEC_I2S,
+ P_EXT_MCLK,
};
static const struct parent_map gcc_xo_gpll0_map[] = {
@@ -190,6 +194,76 @@ static const char * const gcc_xo_gpll0a_gpll1_gpll2[] = {
"gpll2_vote",
};
+static const struct parent_map gcc_xo_gpll0_gpll1_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1, 2 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char * const gcc_xo_gpll0_gpll1_sleep[] = {
+ "xo",
+ "gpll0_vote",
+ "gpll1_vote",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_gpll1_epi2s_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_PRI_I2S, 2 },
+ { P_EXT_MCLK, 3 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char * const gcc_xo_gpll1_epi2s_emclk_sleep[] = {
+ "xo",
+ "gpll1_vote",
+ "ext_pri_i2s",
+ "ext_mclk",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_gpll1_esi2s_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_SEC_I2S, 2 },
+ { P_EXT_MCLK, 3 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char * const gcc_xo_gpll1_esi2s_emclk_sleep[] = {
+ "xo",
+ "gpll1_vote",
+ "ext_sec_i2s",
+ "ext_mclk",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_sleep_map[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char * const gcc_xo_sleep[] = {
+ "xo",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_gpll1_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_MCLK, 2 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char * const gcc_xo_gpll1_emclk_sleep[] = {
+ "xo",
+ "gpll1_vote",
+ "ext_mclk",
+ "sleep_clk",
+};
+
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
static struct clk_pll gpll0 = {
@@ -906,21 +980,15 @@ static struct clk_rcg2 gp3_clk_src = {
},
};
-static struct freq_tbl ftbl_gcc_mdss_byte0_clk[] = {
- { .src = P_DSI0_PHYPLL_BYTE },
- { }
-};
-
static struct clk_rcg2 byte0_clk_src = {
.cmd_rcgr = 0x4d044,
.hid_width = 5,
.parent_map = gcc_xo_gpll0a_dsibyte_map,
- .freq_tbl = ftbl_gcc_mdss_byte0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
.parent_names = gcc_xo_gpll0a_dsibyte,
.num_parents = 3,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -968,17 +1036,11 @@ static struct clk_rcg2 mdp_clk_src = {
},
};
-static struct freq_tbl ftbl_gcc_mdss_pclk[] = {
- { .src = P_DSI0_PHYPLL_DSI },
- { }
-};
-
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x4d000,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0a_dsiphy_map,
- .freq_tbl = ftbl_gcc_mdss_pclk,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_names = gcc_xo_gpll0a_dsiphy,
@@ -1094,6 +1156,30 @@ static struct clk_rcg2 apss_tcu_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_bimc_gpu_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266500000, P_BIMC, 4, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(533000000, P_BIMC, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 bimc_gpu_clk_src = {
+ .cmd_rcgr = 0x31028,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .freq_tbl = ftbl_gcc_bimc_gpu_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_gpu_clk_src",
+ .parent_names = gcc_xo_gpll0_bimc,
+ .num_parents = 3,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
F(80000000, P_GPLL0, 10, 0, 0),
{ }
@@ -1112,6 +1198,305 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_ultaudio_ahb_clk[] = {
+ F(3200000, P_XO, 6, 0, 0),
+ F(6400000, P_XO, 3, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0, 10, 1, 2),
+ F(66670000, P_GPLL0, 12, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_ahbfabric_clk_src = {
+ .cmd_rcgr = 0x1c010,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll0_gpll1_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_ahbfabric_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1_sleep,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_clk = {
+ .halt_reg = 0x1c028,
+ .clkr = {
+ .enable_reg = 0x1c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_ahbfabric_ixfabric_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_ahbfabric_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_lpm_clk = {
+ .halt_reg = 0x1c024,
+ .clkr = {
+ .enable_reg = 0x1c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_ahbfabric_ixfabric_lpm_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_ahbfabric_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ultaudio_lpaif_i2s_clk[] = {
+ F(256000, P_XO, 5, 1, 15),
+ F(512000, P_XO, 5, 2, 15),
+ F(705600, P_GPLL1, 16, 1, 80),
+ F(768000, P_XO, 5, 1, 5),
+ F(800000, P_XO, 5, 5, 24),
+ F(1024000, P_GPLL1, 14, 1, 63),
+ F(1152000, P_XO, 1, 3, 50),
+ F(1411200, P_GPLL1, 16, 1, 40),
+ F(1536000, P_XO, 1, 2, 25),
+ F(1600000, P_XO, 12, 0, 0),
+ F(2048000, P_GPLL1, 9, 1, 49),
+ F(2400000, P_XO, 8, 0, 0),
+ F(2822400, P_GPLL1, 16, 1, 20),
+ F(3072000, P_GPLL1, 14, 1, 21),
+ F(4096000, P_GPLL1, 9, 2, 49),
+ F(4800000, P_XO, 4, 0, 0),
+ F(5644800, P_GPLL1, 16, 1, 10),
+ F(6144000, P_GPLL1, 7, 1, 21),
+ F(8192000, P_GPLL1, 9, 4, 49),
+ F(9600000, P_XO, 2, 0, 0),
+ F(11289600, P_GPLL1, 16, 1, 5),
+ F(12288000, P_GPLL1, 7, 2, 21),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_lpaif_pri_i2s_clk_src = {
+ .cmd_rcgr = 0x1c054,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_epi2s_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_pri_i2s_clk_src",
+ .parent_names = gcc_xo_gpll1_epi2s_emclk_sleep,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_pri_i2s_clk = {
+ .halt_reg = 0x1c068,
+ .clkr = {
+ .enable_reg = 0x1c068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_pri_i2s_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_lpaif_pri_i2s_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 ultaudio_lpaif_sec_i2s_clk_src = {
+ .cmd_rcgr = 0x1c06c,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_esi2s_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_sec_i2s_clk_src",
+ .parent_names = gcc_xo_gpll1_esi2s_emclk_sleep,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_sec_i2s_clk = {
+ .halt_reg = 0x1c080,
+ .clkr = {
+ .enable_reg = 0x1c080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_sec_i2s_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_lpaif_sec_i2s_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 ultaudio_lpaif_aux_i2s_clk_src = {
+ .cmd_rcgr = 0x1c084,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_aux_i2s_clk_src",
+ .parent_names = gcc_xo_gpll1_esi2s_emclk_sleep,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_aux_i2s_clk = {
+ .halt_reg = 0x1c098,
+ .clkr = {
+ .enable_reg = 0x1c098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_aux_i2s_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_lpaif_aux_i2s_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ultaudio_xo_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_xo_clk_src = {
+ .cmd_rcgr = 0x1c034,
+ .hid_width = 5,
+ .parent_map = gcc_xo_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_xo_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_xo_clk_src",
+ .parent_names = gcc_xo_sleep,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_avsync_xo_clk = {
+ .halt_reg = 0x1c04c,
+ .clkr = {
+ .enable_reg = 0x1c04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_avsync_xo_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_xo_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_stc_xo_clk = {
+ .halt_reg = 0x1c050,
+ .clkr = {
+ .enable_reg = 0x1c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_stc_xo_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_xo_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_codec_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(11289600, P_EXT_MCLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 codec_digcodec_clk_src = {
+ .cmd_rcgr = 0x1c09c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll1_emclk_sleep_map,
+ .freq_tbl = ftbl_codec_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "codec_digcodec_clk_src",
+ .parent_names = gcc_xo_gpll1_emclk_sleep,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_codec_digcodec_clk = {
+ .halt_reg = 0x1c0b0,
+ .clkr = {
+ .enable_reg = 0x1c0b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_codec_digcodec_clk",
+ .parent_names = (const char *[]){
+ "codec_digcodec_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_pcnoc_mport_clk = {
+ .halt_reg = 0x1c000,
+ .clkr = {
+ .enable_reg = 0x1c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_pcnoc_mport_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = {
+ .halt_reg = 0x1c004,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_pcnoc_sway_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
F(100000000, P_GPLL0, 8, 0, 0),
F(160000000, P_GPLL0, 5, 0, 0),
@@ -2358,6 +2743,51 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
},
};
+static struct clk_rcg2 bimc_ddr_clk_src = {
+ .cmd_rcgr = 0x32004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_ddr_clk_src",
+ .parent_names = gcc_xo_gpll0_bimc,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_branch gcc_apss_tcu_clk = {
+ .halt_reg = 0x12018,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_tcu_clk",
+ .parent_names = (const char *[]){
+ "bimc_ddr_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tcu_clk = {
+ .halt_reg = 0x12020,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tcu_clk",
+ .parent_names = (const char *[]){
+ "bimc_ddr_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_gtcu_ahb_clk = {
.halt_reg = 0x12044,
.clkr = {
@@ -2375,6 +2805,40 @@ static struct clk_branch gcc_gtcu_ahb_clk = {
},
};
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x31024,
+ .clkr = {
+ .enable_reg = 0x31024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gfx_clk",
+ .parent_names = (const char *[]){
+ "bimc_gpu_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gpu_clk = {
+ .halt_reg = 0x31040,
+ .clkr = {
+ .enable_reg = 0x31040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gpu_clk",
+ .parent_names = (const char *[]){
+ "bimc_gpu_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_jpeg_tbu_clk = {
.halt_reg = 0x12034,
.clkr = {
@@ -2562,6 +3026,46 @@ static struct clk_branch gcc_venus0_vcodec0_clk = {
},
};
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x4c018,
+ .pd = {
+ .name = "venus",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x4d078,
+ .pd = {
+ .name = "mdss",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x5701c,
+ .pd = {
+ .name = "jpeg",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe_gdsc = {
+ .gdscr = 0x58034,
+ .pd = {
+ .name = "vfe",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x5901c,
+ .pd = {
+ .name = "oxili",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct clk_regmap *gcc_msm8916_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL0_VOTE] = &gpll0_vote,
@@ -2701,6 +3205,36 @@ static struct clk_regmap *gcc_msm8916_clocks[] = {
[GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
[GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
[GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [BIMC_DDR_CLK_SRC] = &bimc_ddr_clk_src.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr,
+ [BIMC_GPU_CLK_SRC] = &bimc_gpu_clk_src.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [ULTAUDIO_AHBFABRIC_CLK_SRC] = &ultaudio_ahbfabric_clk_src.clkr,
+ [ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC] = &ultaudio_lpaif_pri_i2s_clk_src.clkr,
+ [ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC] = &ultaudio_lpaif_sec_i2s_clk_src.clkr,
+ [ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC] = &ultaudio_lpaif_aux_i2s_clk_src.clkr,
+ [ULTAUDIO_XO_CLK_SRC] = &ultaudio_xo_clk_src.clkr,
+ [CODEC_DIGCODEC_CLK_SRC] = &codec_digcodec_clk_src.clkr,
+ [GCC_ULTAUDIO_PCNOC_MPORT_CLK] = &gcc_ultaudio_pcnoc_mport_clk.clkr,
+ [GCC_ULTAUDIO_PCNOC_SWAY_CLK] = &gcc_ultaudio_pcnoc_sway_clk.clkr,
+ [GCC_ULTAUDIO_AVSYNC_XO_CLK] = &gcc_ultaudio_avsync_xo_clk.clkr,
+ [GCC_ULTAUDIO_STC_XO_CLK] = &gcc_ultaudio_stc_xo_clk.clkr,
+ [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_clk.clkr,
+ [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_lpm_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK] = &gcc_ultaudio_lpaif_pri_i2s_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK] = &gcc_ultaudio_lpaif_sec_i2s_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK] = &gcc_ultaudio_lpaif_aux_i2s_clk.clkr,
+ [GCC_CODEC_DIGCODEC_CLK] = &gcc_codec_digcodec_clk.clkr,
+};
+
+static struct gdsc *gcc_msm8916_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [VFE_GDSC] = &vfe_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
};
static const struct qcom_reset_map gcc_msm8916_resets[] = {
@@ -2810,6 +3344,8 @@ static const struct qcom_cc_desc gcc_msm8916_desc = {
.num_clks = ARRAY_SIZE(gcc_msm8916_clocks),
.resets = gcc_msm8916_resets,
.num_resets = ARRAY_SIZE(gcc_msm8916_resets),
+ .gdscs = gcc_msm8916_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8916_gdscs),
};
static const struct of_device_id gcc_msm8916_match_table[] = {
@@ -2836,15 +3372,8 @@ static int gcc_msm8916_probe(struct platform_device *pdev)
return qcom_cc_probe(pdev, &gcc_msm8916_desc);
}
-static int gcc_msm8916_remove(struct platform_device *pdev)
-{
- qcom_cc_remove(pdev);
- return 0;
-}
-
static struct platform_driver gcc_msm8916_driver = {
.probe = gcc_msm8916_probe,
- .remove = gcc_msm8916_remove,
.driver = {
.name = "gcc-msm8916",
.of_match_table = gcc_msm8916_match_table,
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index aa294b1bad34..66c18bc97857 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3506,6 +3506,8 @@ static int gcc_msm8960_probe(struct platform_device *pdev)
struct clk *clk;
struct device *dev = &pdev->dev;
const struct of_device_id *match;
+ struct platform_device *tsens;
+ int ret;
match = of_match_device(gcc_msm8960_match_table, &pdev->dev);
if (!match)
@@ -3520,12 +3522,26 @@ static int gcc_msm8960_probe(struct platform_device *pdev)
if (IS_ERR(clk))
return PTR_ERR(clk);
- return qcom_cc_probe(pdev, match->data);
+ ret = qcom_cc_probe(pdev, match->data);
+ if (ret)
+ return ret;
+
+ tsens = platform_device_register_data(&pdev->dev, "qcom-tsens", -1,
+ NULL, 0);
+ if (IS_ERR(tsens))
+ return PTR_ERR(tsens);
+
+ platform_set_drvdata(pdev, tsens);
+
+ return 0;
}
static int gcc_msm8960_remove(struct platform_device *pdev)
{
- qcom_cc_remove(pdev);
+ struct platform_device *tsens = platform_get_drvdata(pdev);
+
+ platform_device_unregister(tsens);
+
return 0;
}
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 2bcf87538f9d..28abb8f8f293 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -31,6 +31,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -2432,6 +2433,14 @@ static struct clk_branch gcc_usb_hsic_system_clk = {
},
};
+static struct gdsc usb_hs_hsic_gdsc = {
+ .gdscr = 0x404,
+ .pd = {
+ .name = "usb_hs_hsic",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct clk_regmap *gcc_msm8974_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL0_VOTE] = &gpll0_vote,
@@ -2661,6 +2670,10 @@ static const struct qcom_reset_map gcc_msm8974_resets[] = {
[GCC_VENUS_RESTART] = { 0x1740 },
};
+static struct gdsc *gcc_msm8974_gdscs[] = {
+ [USB_HS_HSIC_GDSC] = &usb_hs_hsic_gdsc,
+};
+
static const struct regmap_config gcc_msm8974_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -2675,6 +2688,8 @@ static const struct qcom_cc_desc gcc_msm8974_desc = {
.num_clks = ARRAY_SIZE(gcc_msm8974_clocks),
.resets = gcc_msm8974_resets,
.num_resets = ARRAY_SIZE(gcc_msm8974_resets),
+ .gdscs = gcc_msm8974_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8974_gdscs),
};
static const struct of_device_id gcc_msm8974_match_table[] = {
@@ -2729,15 +2744,8 @@ static int gcc_msm8974_probe(struct platform_device *pdev)
return qcom_cc_probe(pdev, &gcc_msm8974_desc);
}
-static int gcc_msm8974_remove(struct platform_device *pdev)
-{
- qcom_cc_remove(pdev);
- return 0;
-}
-
static struct platform_driver gcc_msm8974_driver = {
.probe = gcc_msm8974_probe,
- .remove = gcc_msm8974_remove,
.driver = {
.name = "gcc-msm8974",
.of_match_table = gcc_msm8974_match_table,
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
new file mode 100644
index 000000000000..da9fad8b642b
--- /dev/null
+++ b/drivers/clk/qcom/gdsc.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include "gdsc.h"
+
+#define PWR_ON_MASK BIT(31)
+#define EN_REST_WAIT_MASK GENMASK_ULL(23, 20)
+#define EN_FEW_WAIT_MASK GENMASK_ULL(19, 16)
+#define CLK_DIS_WAIT_MASK GENMASK_ULL(15, 12)
+#define SW_OVERRIDE_MASK BIT(2)
+#define HW_CONTROL_MASK BIT(1)
+#define SW_COLLAPSE_MASK BIT(0)
+
+/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
+#define EN_REST_WAIT_VAL (0x2 << 20)
+#define EN_FEW_WAIT_VAL (0x8 << 16)
+#define CLK_DIS_WAIT_VAL (0x2 << 12)
+
+#define RETAIN_MEM BIT(14)
+#define RETAIN_PERIPH BIT(13)
+
+#define TIMEOUT_US 100
+
+#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
+
+static int gdsc_is_enabled(struct gdsc *sc)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(sc->regmap, sc->gdscr, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & PWR_ON_MASK);
+}
+
+static int gdsc_toggle_logic(struct gdsc *sc, bool en)
+{
+ int ret;
+ u32 val = en ? 0 : SW_COLLAPSE_MASK;
+ u32 check = en ? PWR_ON_MASK : 0;
+ unsigned long timeout;
+
+ ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
+ if (ret)
+ return ret;
+
+ timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
+ do {
+ ret = regmap_read(sc->regmap, sc->gdscr, &val);
+ if (ret)
+ return ret;
+
+ if ((val & PWR_ON_MASK) == check)
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ ret = regmap_read(sc->regmap, sc->gdscr, &val);
+ if (ret)
+ return ret;
+
+ if ((val & PWR_ON_MASK) == check)
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+static inline int gdsc_deassert_reset(struct gdsc *sc)
+{
+ int i;
+
+ for (i = 0; i < sc->reset_count; i++)
+ sc->rcdev->ops->deassert(sc->rcdev, sc->resets[i]);
+ return 0;
+}
+
+static inline int gdsc_assert_reset(struct gdsc *sc)
+{
+ int i;
+
+ for (i = 0; i < sc->reset_count; i++)
+ sc->rcdev->ops->assert(sc->rcdev, sc->resets[i]);
+ return 0;
+}
+
+static inline void gdsc_force_mem_on(struct gdsc *sc)
+{
+ int i;
+ u32 mask = RETAIN_MEM | RETAIN_PERIPH;
+
+ for (i = 0; i < sc->cxc_count; i++)
+ regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask);
+}
+
+static inline void gdsc_clear_mem_on(struct gdsc *sc)
+{
+ int i;
+ u32 mask = RETAIN_MEM | RETAIN_PERIPH;
+
+ for (i = 0; i < sc->cxc_count; i++)
+ regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
+}
+
+static int gdsc_enable(struct generic_pm_domain *domain)
+{
+ struct gdsc *sc = domain_to_gdsc(domain);
+ int ret;
+
+ if (sc->pwrsts == PWRSTS_ON)
+ return gdsc_deassert_reset(sc);
+
+ ret = gdsc_toggle_logic(sc, true);
+ if (ret)
+ return ret;
+
+ if (sc->pwrsts & PWRSTS_OFF)
+ gdsc_force_mem_on(sc);
+
+ /*
+ * If clocks to this power domain were already on, they will take an
+ * additional 4 clock cycles to re-enable after the power domain is
+ * enabled. Delay to account for this. A delay is also needed to ensure
+ * clocks are not enabled within 400ns of enabling power to the
+ * memories.
+ */
+ udelay(1);
+
+ return 0;
+}
+
+static int gdsc_disable(struct generic_pm_domain *domain)
+{
+ struct gdsc *sc = domain_to_gdsc(domain);
+
+ if (sc->pwrsts == PWRSTS_ON)
+ return gdsc_assert_reset(sc);
+
+ if (sc->pwrsts & PWRSTS_OFF)
+ gdsc_clear_mem_on(sc);
+
+ return gdsc_toggle_logic(sc, false);
+}
+
+static int gdsc_init(struct gdsc *sc)
+{
+ u32 mask, val;
+ int on, ret;
+
+ /*
+ * Disable HW trigger: collapse/restore occur based on registers writes.
+ * Disable SW override: Use hardware state-machine for sequencing.
+ * Configure wait time between states.
+ */
+ mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
+ EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
+ val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
+ ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
+ if (ret)
+ return ret;
+
+ /* Force gdsc ON if only ON state is supported */
+ if (sc->pwrsts == PWRSTS_ON) {
+ ret = gdsc_toggle_logic(sc, true);
+ if (ret)
+ return ret;
+ }
+
+ on = gdsc_is_enabled(sc);
+ if (on < 0)
+ return on;
+
+ if (on || (sc->pwrsts & PWRSTS_RET))
+ gdsc_force_mem_on(sc);
+ else
+ gdsc_clear_mem_on(sc);
+
+ sc->pd.power_off = gdsc_disable;
+ sc->pd.power_on = gdsc_enable;
+ pm_genpd_init(&sc->pd, NULL, !on);
+
+ return 0;
+}
+
+int gdsc_register(struct device *dev, struct gdsc **scs, size_t num,
+ struct reset_controller_dev *rcdev, struct regmap *regmap)
+{
+ int i, ret;
+ struct genpd_onecell_data *data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
+ GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
+ data->num_domains = num;
+ for (i = 0; i < num; i++) {
+ if (!scs[i])
+ continue;
+ scs[i]->regmap = regmap;
+ scs[i]->rcdev = rcdev;
+ ret = gdsc_init(scs[i]);
+ if (ret)
+ return ret;
+ data->domains[i] = &scs[i]->pd;
+ }
+
+ return of_genpd_add_provider_onecell(dev->of_node, data);
+}
+
+void gdsc_unregister(struct device *dev)
+{
+ of_genpd_del_provider(dev->of_node);
+}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
new file mode 100644
index 000000000000..5ded26884f08
--- /dev/null
+++ b/drivers/clk/qcom/gdsc.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_GDSC_H__
+#define __QCOM_GDSC_H__
+
+#include <linux/err.h>
+#include <linux/pm_domain.h>
+
+struct regmap;
+struct reset_controller_dev;
+
+/* Powerdomain allowable state bitfields */
+#define PWRSTS_OFF BIT(0)
+#define PWRSTS_RET BIT(1)
+#define PWRSTS_ON BIT(2)
+#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
+#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
+
+/**
+ * struct gdsc - Globally Distributed Switch Controller
+ * @pd: generic power domain
+ * @regmap: regmap for MMIO accesses
+ * @gdscr: gsdc control register
+ * @cxcs: offsets of branch registers to toggle mem/periph bits in
+ * @cxc_count: number of @cxcs
+ * @pwrsts: Possible powerdomain power states
+ * @resets: ids of resets associated with this gdsc
+ * @reset_count: number of @resets
+ * @rcdev: reset controller
+ */
+struct gdsc {
+ struct generic_pm_domain pd;
+ struct regmap *regmap;
+ unsigned int gdscr;
+ unsigned int *cxcs;
+ unsigned int cxc_count;
+ const u8 pwrsts;
+ struct reset_controller_dev *rcdev;
+ unsigned int *resets;
+ unsigned int reset_count;
+};
+
+#ifdef CONFIG_QCOM_GDSC
+int gdsc_register(struct device *, struct gdsc **, size_t n,
+ struct reset_controller_dev *, struct regmap *);
+void gdsc_unregister(struct device *);
+#else
+static inline int gdsc_register(struct device *d, struct gdsc **g, size_t n,
+ struct reset_controller_dev *rcdev,
+ struct regmap *r)
+{
+ return -ENOSYS;
+}
+
+static inline void gdsc_unregister(struct device *d) {};
+#endif /* CONFIG_QCOM_GDSC */
+#endif /* __QCOM_GDSC_H__ */
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 93ad42b14366..db3998e5e2d8 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -452,15 +452,8 @@ static int lcc_ipq806x_probe(struct platform_device *pdev)
return qcom_cc_really_probe(pdev, &lcc_ipq806x_desc, regmap);
}
-static int lcc_ipq806x_remove(struct platform_device *pdev)
-{
- qcom_cc_remove(pdev);
- return 0;
-}
-
static struct platform_driver lcc_ipq806x_driver = {
.probe = lcc_ipq806x_probe,
- .remove = lcc_ipq806x_remove,
.driver = {
.name = "lcc-ipq806x",
.of_match_table = lcc_ipq806x_match_table,
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index ecb96c284675..4fcf9d1d233c 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -565,15 +565,8 @@ static int lcc_msm8960_probe(struct platform_device *pdev)
return qcom_cc_really_probe(pdev, &lcc_msm8960_desc, regmap);
}
-static int lcc_msm8960_remove(struct platform_device *pdev)
-{
- qcom_cc_remove(pdev);
- return 0;
-}
-
static struct platform_driver lcc_msm8960_driver = {
.probe = lcc_msm8960_probe,
- .remove = lcc_msm8960_remove,
.driver = {
.name = "lcc-msm8960",
.of_match_table = lcc_msm8960_match_table,
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index f0ee6bde11af..30777f9f1a43 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -26,6 +26,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -571,17 +572,11 @@ static struct clk_rcg2 jpeg2_clk_src = {
},
};
-static struct freq_tbl pixel_freq_tbl[] = {
- { .src = P_DSI0PLL },
- { }
-};
-
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x2000,
.mnd_width = 8,
.hid_width = 5,
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
- .freq_tbl = pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
@@ -596,7 +591,6 @@ static struct clk_rcg2 pclk1_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
- .freq_tbl = pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
@@ -844,21 +838,15 @@ static struct clk_rcg2 cpp_clk_src = {
},
};
-static struct freq_tbl byte_freq_tbl[] = {
- { .src = P_DSI0PLL_BYTE },
- { }
-};
-
static struct clk_rcg2 byte0_clk_src = {
.cmd_rcgr = 0x2120,
.hid_width = 5,
.parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
- .freq_tbl = byte_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
.num_parents = 6,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -867,12 +855,11 @@ static struct clk_rcg2 byte1_clk_src = {
.cmd_rcgr = 0x2140,
.hid_width = 5,
.parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
- .freq_tbl = byte_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
.num_parents = 6,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -3077,6 +3064,76 @@ static const struct pll_config mmpll3_config = {
.aux_output_mask = BIT(1),
};
+static struct gdsc venus0_gdsc = {
+ .gdscr = 0x1024,
+ .pd = {
+ .name = "venus0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus0_core0_gdsc = {
+ .gdscr = 0x1040,
+ .pd = {
+ .name = "venus0_core0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus0_core1_gdsc = {
+ .gdscr = 0x1044,
+ .pd = {
+ .name = "venus0_core1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .cxcs = (unsigned int []){ 0x231c, 0x2320 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "mdss",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_jpeg_gdsc = {
+ .gdscr = 0x35a4,
+ .pd = {
+ .name = "camss_jpeg",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_vfe_gdsc = {
+ .gdscr = 0x36a4,
+ .cxcs = (unsigned int []){ 0x36a8, 0x36ac, 0x36b0 },
+ .cxc_count = 3,
+ .pd = {
+ .name = "camss_vfe",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x4024,
+ .cxcs = (unsigned int []){ 0x4028 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxilicx_gdsc = {
+ .gdscr = 0x4034,
+ .pd = {
+ .name = "oxilicx",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct clk_regmap *mmcc_apq8084_clocks[] = {
[MMSS_AHB_CLK_SRC] = &mmss_ahb_clk_src.clkr,
[MMSS_AXI_CLK_SRC] = &mmss_axi_clk_src.clkr,
@@ -3294,6 +3351,17 @@ static const struct qcom_reset_map mmcc_apq8084_resets[] = {
[MMSSNOCAXI_RESET] = { 0x5060 },
};
+static struct gdsc *mmcc_apq8084_gdscs[] = {
+ [VENUS0_GDSC] = &venus0_gdsc,
+ [VENUS0_CORE0_GDSC] = &venus0_core0_gdsc,
+ [VENUS0_CORE1_GDSC] = &venus0_core1_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [CAMSS_JPEG_GDSC] = &camss_jpeg_gdsc,
+ [CAMSS_VFE_GDSC] = &camss_vfe_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
+ [OXILICX_GDSC] = &oxilicx_gdsc,
+};
+
static const struct regmap_config mmcc_apq8084_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3308,6 +3376,8 @@ static const struct qcom_cc_desc mmcc_apq8084_desc = {
.num_clks = ARRAY_SIZE(mmcc_apq8084_clocks),
.resets = mmcc_apq8084_resets,
.num_resets = ARRAY_SIZE(mmcc_apq8084_resets),
+ .gdscs = mmcc_apq8084_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_apq8084_gdscs),
};
static const struct of_device_id mmcc_apq8084_match_table[] = {
@@ -3332,15 +3402,8 @@ static int mmcc_apq8084_probe(struct platform_device *pdev)
return 0;
}
-static int mmcc_apq8084_remove(struct platform_device *pdev)
-{
- qcom_cc_remove(pdev);
- return 0;
-}
-
static struct platform_driver mmcc_apq8084_driver = {
.probe = mmcc_apq8084_probe,
- .remove = mmcc_apq8084_remove,
.driver = {
.name = "mmcc-apq8084",
.of_match_table = mmcc_apq8084_match_table,
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index bad02aebf959..00e36192a1de 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -41,6 +41,10 @@ enum {
P_PLL3,
P_PLL15,
P_HDMI_PLL,
+ P_DSI1_PLL_DSICLK,
+ P_DSI2_PLL_DSICLK,
+ P_DSI1_PLL_BYTECLK,
+ P_DSI2_PLL_BYTECLK,
};
#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
@@ -85,6 +89,30 @@ static const char * const mmcc_pxo_pll8_pll2_pll3[] = {
"pll3",
};
+static const struct parent_map mmcc_pxo_dsi2_dsi1_map[] = {
+ { P_PXO, 0 },
+ { P_DSI2_PLL_DSICLK, 1 },
+ { P_DSI1_PLL_DSICLK, 3 },
+};
+
+static const char * const mmcc_pxo_dsi2_dsi1[] = {
+ "pxo",
+ "dsi2pll",
+ "dsi1pll",
+};
+
+static const struct parent_map mmcc_pxo_dsi1_dsi2_byte_map[] = {
+ { P_PXO, 0 },
+ { P_DSI1_PLL_BYTECLK, 1 },
+ { P_DSI2_PLL_BYTECLK, 2 },
+};
+
+static const char * const mmcc_pxo_dsi1_dsi2_byte[] = {
+ "pxo",
+ "dsi1pllbyte",
+ "dsi2pllbyte",
+};
+
static struct clk_pll pll2 = {
.l_reg = 0x320,
.m_reg = 0x324,
@@ -2042,6 +2070,350 @@ static struct clk_branch dsi2_s_ahb_clk = {
},
};
+static struct clk_rcg dsi1_src = {
+ .ns_reg = 0x0054,
+ .md_reg = 0x0050,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_dsi2_dsi1_map,
+ },
+ .clkr = {
+ .enable_reg = 0x004c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1_src",
+ .parent_names = mmcc_pxo_dsi2_dsi1,
+ .num_parents = 3,
+ .ops = &clk_rcg_bypass2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch dsi1_clk = {
+ .halt_reg = 0x01d0,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x004c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1_clk",
+ .parent_names = (const char *[]){ "dsi1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg dsi2_src = {
+ .ns_reg = 0x012c,
+ .md_reg = 0x00a8,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_dsi2_dsi1_map,
+ },
+ .clkr = {
+ .enable_reg = 0x003c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_src",
+ .parent_names = mmcc_pxo_dsi2_dsi1,
+ .num_parents = 3,
+ .ops = &clk_rcg_bypass2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch dsi2_clk = {
+ .halt_reg = 0x01d0,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x003c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_clk",
+ .parent_names = (const char *[]){ "dsi2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg dsi1_byte_src = {
+ .ns_reg = 0x00b0,
+ .p = {
+ .pre_div_shift = 12,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_dsi1_dsi2_byte_map,
+ },
+ .clkr = {
+ .enable_reg = 0x0090,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1_byte_src",
+ .parent_names = mmcc_pxo_dsi1_dsi2_byte,
+ .num_parents = 3,
+ .ops = &clk_rcg_bypass2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch dsi1_byte_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 21,
+ .clkr = {
+ .enable_reg = 0x0090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1_byte_clk",
+ .parent_names = (const char *[]){ "dsi1_byte_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg dsi2_byte_src = {
+ .ns_reg = 0x012c,
+ .p = {
+ .pre_div_shift = 12,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_dsi1_dsi2_byte_map,
+ },
+ .clkr = {
+ .enable_reg = 0x0130,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_byte_src",
+ .parent_names = mmcc_pxo_dsi1_dsi2_byte,
+ .num_parents = 3,
+ .ops = &clk_rcg_bypass2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch dsi2_byte_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x00b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_byte_clk",
+ .parent_names = (const char *[]){ "dsi2_byte_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg dsi1_esc_src = {
+ .ns_reg = 0x0011c,
+ .p = {
+ .pre_div_shift = 12,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_dsi1_dsi2_byte_map,
+ },
+ .clkr = {
+ .enable_reg = 0x00cc,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1_esc_src",
+ .parent_names = mmcc_pxo_dsi1_dsi2_byte,
+ .num_parents = 3,
+ .ops = &clk_rcg_esc_ops,
+ },
+ },
+};
+
+static struct clk_branch dsi1_esc_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 1,
+ .clkr = {
+ .enable_reg = 0x00cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1_esc_clk",
+ .parent_names = (const char *[]){ "dsi1_esc_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg dsi2_esc_src = {
+ .ns_reg = 0x0150,
+ .p = {
+ .pre_div_shift = 12,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_dsi1_dsi2_byte_map,
+ },
+ .clkr = {
+ .enable_reg = 0x013c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_esc_src",
+ .parent_names = mmcc_pxo_dsi1_dsi2_byte,
+ .num_parents = 3,
+ .ops = &clk_rcg_esc_ops,
+ },
+ },
+};
+
+static struct clk_branch dsi2_esc_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x013c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_esc_clk",
+ .parent_names = (const char *[]){ "dsi2_esc_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg dsi1_pixel_src = {
+ .ns_reg = 0x0138,
+ .md_reg = 0x0134,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 16,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 12,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_dsi2_dsi1_map,
+ },
+ .clkr = {
+ .enable_reg = 0x0130,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1_pixel_src",
+ .parent_names = mmcc_pxo_dsi2_dsi1,
+ .num_parents = 3,
+ .ops = &clk_rcg_pixel_ops,
+ },
+ },
+};
+
+static struct clk_branch dsi1_pixel_clk = {
+ .halt_reg = 0x01d0,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x0130,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_pclk1_clk",
+ .parent_names = (const char *[]){ "dsi1_pixel_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg dsi2_pixel_src = {
+ .ns_reg = 0x00e4,
+ .md_reg = 0x00b8,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 16,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 12,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_dsi2_dsi1_map,
+ },
+ .clkr = {
+ .enable_reg = 0x0094,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_pixel_src",
+ .parent_names = mmcc_pxo_dsi2_dsi1,
+ .num_parents = 3,
+ .ops = &clk_rcg_pixel_ops,
+ },
+ },
+};
+
+static struct clk_branch dsi2_pixel_clk = {
+ .halt_reg = 0x01d0,
+ .halt_bit = 19,
+ .clkr = {
+ .enable_reg = 0x0094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_pclk2_clk",
+ .parent_names = (const char *[]){ "dsi2_pixel_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gfx2d0_ahb_clk = {
.hwcg_reg = 0x0038,
.hwcg_bit = 28,
@@ -2325,6 +2697,8 @@ static struct clk_regmap *mmcc_msm8960_clks[] = {
[CSI2_SRC] = &csi2_src.clkr,
[CSI2_CLK] = &csi2_clk.clkr,
[CSI2_PHY_CLK] = &csi2_phy_clk.clkr,
+ [DSI_SRC] = &dsi1_src.clkr,
+ [DSI_CLK] = &dsi1_clk.clkr,
[CSI_PIX_CLK] = &csi_pix_clk.clkr,
[CSI_RDI_CLK] = &csi_rdi_clk.clkr,
[MDP_VSYNC_CLK] = &mdp_vsync_clk.clkr,
@@ -2345,6 +2719,18 @@ static struct clk_regmap *mmcc_msm8960_clks[] = {
[MDP_SRC] = &mdp_src.clkr,
[MDP_CLK] = &mdp_clk.clkr,
[MDP_LUT_CLK] = &mdp_lut_clk.clkr,
+ [DSI2_PIXEL_SRC] = &dsi2_pixel_src.clkr,
+ [DSI2_PIXEL_CLK] = &dsi2_pixel_clk.clkr,
+ [DSI2_SRC] = &dsi2_src.clkr,
+ [DSI2_CLK] = &dsi2_clk.clkr,
+ [DSI1_BYTE_SRC] = &dsi1_byte_src.clkr,
+ [DSI1_BYTE_CLK] = &dsi1_byte_clk.clkr,
+ [DSI2_BYTE_SRC] = &dsi2_byte_src.clkr,
+ [DSI2_BYTE_CLK] = &dsi2_byte_clk.clkr,
+ [DSI1_ESC_SRC] = &dsi1_esc_src.clkr,
+ [DSI1_ESC_CLK] = &dsi1_esc_clk.clkr,
+ [DSI2_ESC_SRC] = &dsi2_esc_src.clkr,
+ [DSI2_ESC_CLK] = &dsi2_esc_clk.clkr,
[ROT_SRC] = &rot_src.clkr,
[ROT_CLK] = &rot_clk.clkr,
[TV_ENC_CLK] = &tv_enc_clk.clkr,
@@ -2359,6 +2745,8 @@ static struct clk_regmap *mmcc_msm8960_clks[] = {
[VFE_CSI_CLK] = &vfe_csi_clk.clkr,
[VPE_SRC] = &vpe_src.clkr,
[VPE_CLK] = &vpe_clk.clkr,
+ [DSI_PIXEL_SRC] = &dsi1_pixel_src.clkr,
+ [DSI_PIXEL_CLK] = &dsi1_pixel_clk.clkr,
[CAMCLK0_SRC] = &camclk0_src.clkr,
[CAMCLK0_CLK] = &camclk0_clk.clkr,
[CAMCLK1_SRC] = &camclk1_src.clkr,
@@ -2490,6 +2878,8 @@ static struct clk_regmap *mmcc_apq8064_clks[] = {
[CSI2_SRC] = &csi2_src.clkr,
[CSI2_CLK] = &csi2_clk.clkr,
[CSI2_PHY_CLK] = &csi2_phy_clk.clkr,
+ [DSI_SRC] = &dsi1_src.clkr,
+ [DSI_CLK] = &dsi1_clk.clkr,
[CSI_PIX_CLK] = &csi_pix_clk.clkr,
[CSI_RDI_CLK] = &csi_rdi_clk.clkr,
[MDP_VSYNC_CLK] = &mdp_vsync_clk.clkr,
@@ -2506,6 +2896,18 @@ static struct clk_regmap *mmcc_apq8064_clks[] = {
[MDP_SRC] = &mdp_src.clkr,
[MDP_CLK] = &mdp_clk.clkr,
[MDP_LUT_CLK] = &mdp_lut_clk.clkr,
+ [DSI2_PIXEL_SRC] = &dsi2_pixel_src.clkr,
+ [DSI2_PIXEL_CLK] = &dsi2_pixel_clk.clkr,
+ [DSI2_SRC] = &dsi2_src.clkr,
+ [DSI2_CLK] = &dsi2_clk.clkr,
+ [DSI1_BYTE_SRC] = &dsi1_byte_src.clkr,
+ [DSI1_BYTE_CLK] = &dsi1_byte_clk.clkr,
+ [DSI2_BYTE_SRC] = &dsi2_byte_src.clkr,
+ [DSI2_BYTE_CLK] = &dsi2_byte_clk.clkr,
+ [DSI1_ESC_SRC] = &dsi1_esc_src.clkr,
+ [DSI1_ESC_CLK] = &dsi1_esc_clk.clkr,
+ [DSI2_ESC_SRC] = &dsi2_esc_src.clkr,
+ [DSI2_ESC_CLK] = &dsi2_esc_clk.clkr,
[ROT_SRC] = &rot_src.clkr,
[ROT_CLK] = &rot_clk.clkr,
[TV_DAC_CLK] = &tv_dac_clk.clkr,
@@ -2519,6 +2921,8 @@ static struct clk_regmap *mmcc_apq8064_clks[] = {
[VFE_CSI_CLK] = &vfe_csi_clk.clkr,
[VPE_SRC] = &vpe_src.clkr,
[VPE_CLK] = &vpe_clk.clkr,
+ [DSI_PIXEL_SRC] = &dsi1_pixel_src.clkr,
+ [DSI_PIXEL_CLK] = &dsi1_pixel_clk.clkr,
[CAMCLK0_SRC] = &camclk0_src.clkr,
[CAMCLK0_CLK] = &camclk0_clk.clkr,
[CAMCLK1_SRC] = &camclk1_src.clkr,
@@ -2686,15 +3090,8 @@ static int mmcc_msm8960_probe(struct platform_device *pdev)
return qcom_cc_really_probe(pdev, match->data, regmap);
}
-static int mmcc_msm8960_remove(struct platform_device *pdev)
-{
- qcom_cc_remove(pdev);
- return 0;
-}
-
static struct platform_driver mmcc_msm8960_driver = {
.probe = mmcc_msm8960_probe,
- .remove = mmcc_msm8960_remove,
.driver = {
.name = "mmcc-msm8960",
.of_match_table = mmcc_msm8960_match_table,
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 0987bf443e1f..9d790bcadf25 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -31,6 +31,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -522,17 +523,11 @@ static struct clk_rcg2 jpeg2_clk_src = {
},
};
-static struct freq_tbl pixel_freq_tbl[] = {
- { .src = P_DSI0PLL },
- { }
-};
-
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x2000,
.mnd_width = 8,
.hid_width = 5,
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
- .freq_tbl = pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
@@ -547,7 +542,6 @@ static struct clk_rcg2 pclk1_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
- .freq_tbl = pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
@@ -785,7 +779,7 @@ static struct clk_rcg2 byte0_clk_src = {
.name = "byte0_clk_src",
.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
.num_parents = 6,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -799,7 +793,7 @@ static struct clk_rcg2 byte1_clk_src = {
.name = "byte1_clk_src",
.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
.num_parents = 6,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2349,6 +2343,66 @@ static struct pll_config mmpll3_config = {
.aux_output_mask = BIT(1),
};
+static struct gdsc venus0_gdsc = {
+ .gdscr = 0x1024,
+ .cxcs = (unsigned int []){ 0x1028 },
+ .cxc_count = 1,
+ .resets = (unsigned int []){ VENUS0_RESET },
+ .reset_count = 1,
+ .pd = {
+ .name = "venus0",
+ },
+ .pwrsts = PWRSTS_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .cxcs = (unsigned int []){ 0x231c, 0x2320 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "mdss",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+};
+
+static struct gdsc camss_jpeg_gdsc = {
+ .gdscr = 0x35a4,
+ .cxcs = (unsigned int []){ 0x35a8, 0x35ac, 0x35b0 },
+ .cxc_count = 3,
+ .pd = {
+ .name = "camss_jpeg",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_vfe_gdsc = {
+ .gdscr = 0x36a4,
+ .cxcs = (unsigned int []){ 0x36a8, 0x36ac, 0x3704, 0x3714, 0x36b0 },
+ .cxc_count = 5,
+ .pd = {
+ .name = "camss_vfe",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x4024,
+ .cxcs = (unsigned int []){ 0x4028 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxilicx_gdsc = {
+ .gdscr = 0x4034,
+ .pd = {
+ .name = "oxilicx",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct clk_regmap *mmcc_msm8974_clocks[] = {
[MMSS_AHB_CLK_SRC] = &mmss_ahb_clk_src.clkr,
[MMSS_AXI_CLK_SRC] = &mmss_axi_clk_src.clkr,
@@ -2525,6 +2579,15 @@ static const struct qcom_reset_map mmcc_msm8974_resets[] = {
[OCMEMNOC_RESET] = { 0x50b0 },
};
+static struct gdsc *mmcc_msm8974_gdscs[] = {
+ [VENUS0_GDSC] = &venus0_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [CAMSS_JPEG_GDSC] = &camss_jpeg_gdsc,
+ [CAMSS_VFE_GDSC] = &camss_vfe_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
+ [OXILICX_GDSC] = &oxilicx_gdsc,
+};
+
static const struct regmap_config mmcc_msm8974_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -2539,6 +2602,8 @@ static const struct qcom_cc_desc mmcc_msm8974_desc = {
.num_clks = ARRAY_SIZE(mmcc_msm8974_clocks),
.resets = mmcc_msm8974_resets,
.num_resets = ARRAY_SIZE(mmcc_msm8974_resets),
+ .gdscs = mmcc_msm8974_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_msm8974_gdscs),
};
static const struct of_device_id mmcc_msm8974_match_table[] = {
@@ -2550,6 +2615,7 @@ MODULE_DEVICE_TABLE(of, mmcc_msm8974_match_table);
static int mmcc_msm8974_probe(struct platform_device *pdev)
{
struct regmap *regmap;
+ int ret;
regmap = qcom_cc_map(pdev, &mmcc_msm8974_desc);
if (IS_ERR(regmap))
@@ -2558,12 +2624,16 @@ static int mmcc_msm8974_probe(struct platform_device *pdev)
clk_pll_configure_sr_hpm_lp(&mmpll1, regmap, &mmpll1_config, true);
clk_pll_configure_sr_hpm_lp(&mmpll3, regmap, &mmpll3_config, false);
- return qcom_cc_really_probe(pdev, &mmcc_msm8974_desc, regmap);
+ ret = qcom_cc_really_probe(pdev, &mmcc_msm8974_desc, regmap);
+ if (ret)
+ return ret;
+
+ return pm_genpd_add_subdomain(&oxili_gdsc.pd, &oxilicx_gdsc.pd);
}
static int mmcc_msm8974_remove(struct platform_device *pdev)
{
- qcom_cc_remove(pdev);
+ pm_genpd_remove_subdomain(&oxili_gdsc.pd, &oxilicx_gdsc.pd);
return 0;
}
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index bc24e5a002e7..2685644826a0 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -41,6 +41,8 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
#define ROCKCHIP_MMC_DEGREE_MASK 0x3
#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
+#define ROCKCHIP_MMC_INIT_STATE_RESET 0x1
+#define ROCKCHIP_MMC_INIT_STATE_SHIFT 1
#define PSECS_PER_SEC 1000000000000LL
@@ -159,6 +161,15 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock->reg = reg;
mmc_clock->shift = shift;
+ /*
+ * Assert init_state to soft reset the CLKGEN
+ * for mmc tuning phase and degree
+ */
+ if (mmc_clock->shift == ROCKCHIP_MMC_INIT_STATE_SHIFT)
+ writel(HIWORD_UPDATE(ROCKCHIP_MMC_INIT_STATE_RESET,
+ ROCKCHIP_MMC_INIT_STATE_RESET,
+ mmc_clock->shift), mmc_clock->reg);
+
clk = clk_register(NULL, &mmc_clock->hw);
if (IS_ERR(clk))
goto err_free;
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 7737a1df1e4b..4881eb8a1576 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -126,11 +126,32 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
#define RK3066_PLLCON3_PWRDOWN (1 << 1)
#define RK3066_PLLCON3_BYPASS (1 << 0)
+static void rockchip_rk3066_pll_get_params(struct rockchip_clk_pll *pll,
+ struct rockchip_pll_rate_table *rate)
+{
+ u32 pllcon;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
+ rate->nr = ((pllcon >> RK3066_PLLCON0_NR_SHIFT)
+ & RK3066_PLLCON0_NR_MASK) + 1;
+ rate->no = ((pllcon >> RK3066_PLLCON0_OD_SHIFT)
+ & RK3066_PLLCON0_OD_MASK) + 1;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
+ rate->nf = ((pllcon >> RK3066_PLLCON1_NF_SHIFT)
+ & RK3066_PLLCON1_NF_MASK) + 1;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(2));
+ rate->nb = ((pllcon >> RK3066_PLLCON2_NB_SHIFT)
+ & RK3066_PLLCON2_NB_MASK) + 1;
+}
+
static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
- u64 nf, nr, no, rate64 = prate;
+ struct rockchip_pll_rate_table cur;
+ u64 rate64 = prate;
u32 pllcon;
pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(3));
@@ -140,53 +161,31 @@ static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw,
return prate;
}
- pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
- nf = (pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK;
-
- pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
- nr = (pllcon >> RK3066_PLLCON0_NR_SHIFT) & RK3066_PLLCON0_NR_MASK;
- no = (pllcon >> RK3066_PLLCON0_OD_SHIFT) & RK3066_PLLCON0_OD_MASK;
+ rockchip_rk3066_pll_get_params(pll, &cur);
- rate64 *= (nf + 1);
- do_div(rate64, nr + 1);
- do_div(rate64, no + 1);
+ rate64 *= cur.nf;
+ do_div(rate64, cur.nr);
+ do_div(rate64, cur.no);
return (unsigned long)rate64;
}
-static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long prate)
+static int rockchip_rk3066_pll_set_params(struct rockchip_clk_pll *pll,
+ const struct rockchip_pll_rate_table *rate)
{
- struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
- const struct rockchip_pll_rate_table *rate;
- unsigned long old_rate = rockchip_rk3066_pll_recalc_rate(hw, prate);
- struct regmap *grf = rockchip_clk_get_grf();
- struct clk_mux *pll_mux = &pll->pll_mux;
const struct clk_ops *pll_mux_ops = pll->pll_mux_ops;
+ struct clk_mux *pll_mux = &pll->pll_mux;
+ struct rockchip_pll_rate_table cur;
int rate_change_remuxed = 0;
int cur_parent;
int ret;
- if (IS_ERR(grf)) {
- pr_debug("%s: grf regmap not available, aborting rate change\n",
- __func__);
- return PTR_ERR(grf);
- }
-
- pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
- __func__, clk_hw_get_name(hw), old_rate, drate, prate);
-
- /* Get required rate settings from table */
- rate = rockchip_get_pll_settings(pll, drate);
- if (!rate) {
- pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
- drate, clk_hw_get_name(hw));
- return -EINVAL;
- }
-
pr_debug("%s: rate settings for %lu (nr, no, nf): (%d, %d, %d)\n",
__func__, rate->rate, rate->nr, rate->no, rate->nf);
+ rockchip_rk3066_pll_get_params(pll, &cur);
+ cur.rate = 0;
+
cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
if (cur_parent == PLL_MODE_NORM) {
pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
@@ -219,9 +218,9 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
/* wait for the pll to lock */
ret = rockchip_pll_wait_lock(pll);
if (ret) {
- pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
- __func__, old_rate);
- rockchip_rk3066_pll_set_rate(hw, old_rate, prate);
+ pr_warn("%s: pll update unsucessful, trying to restore old params\n",
+ __func__);
+ rockchip_rk3066_pll_set_params(pll, &cur);
}
if (rate_change_remuxed)
@@ -230,6 +229,34 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
return ret;
}
+static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ const struct rockchip_pll_rate_table *rate;
+ unsigned long old_rate = rockchip_rk3066_pll_recalc_rate(hw, prate);
+ struct regmap *grf = rockchip_clk_get_grf();
+
+ if (IS_ERR(grf)) {
+ pr_debug("%s: grf regmap not available, aborting rate change\n",
+ __func__);
+ return PTR_ERR(grf);
+ }
+
+ pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
+ __func__, clk_hw_get_name(hw), old_rate, drate, prate);
+
+ /* Get required rate settings from table */
+ rate = rockchip_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ return rockchip_rk3066_pll_set_params(pll, rate);
+}
+
static int rockchip_rk3066_pll_enable(struct clk_hw *hw)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
@@ -261,9 +288,8 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate;
- unsigned int nf, nr, no, nb;
+ struct rockchip_pll_rate_table cur;
unsigned long drate;
- u32 pllcon;
if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
return;
@@ -275,34 +301,21 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw)
if (!rate)
return;
- pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
- nr = ((pllcon >> RK3066_PLLCON0_NR_SHIFT) & RK3066_PLLCON0_NR_MASK) + 1;
- no = ((pllcon >> RK3066_PLLCON0_OD_SHIFT) & RK3066_PLLCON0_OD_MASK) + 1;
-
- pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
- nf = ((pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK) + 1;
-
- pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(2));
- nb = ((pllcon >> RK3066_PLLCON2_NB_SHIFT) & RK3066_PLLCON2_NB_MASK) + 1;
+ rockchip_rk3066_pll_get_params(pll, &cur);
pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), nb(%d:%d)\n",
- __func__, clk_hw_get_name(hw), drate, rate->nr, nr,
- rate->no, no, rate->nf, nf, rate->nb, nb);
- if (rate->nr != nr || rate->no != no || rate->nf != nf
- || rate->nb != nb) {
- struct clk_hw *parent = clk_hw_get_parent(hw);
- unsigned long prate;
-
- if (!parent) {
- pr_warn("%s: parent of %s not available\n",
- __func__, clk_hw_get_name(hw));
+ __func__, clk_hw_get_name(hw), drate, rate->nr, cur.nr,
+ rate->no, cur.no, rate->nf, cur.nf, rate->nb, cur.nb);
+ if (rate->nr != cur.nr || rate->no != cur.no || rate->nf != cur.nf
+ || rate->nb != cur.nb) {
+ struct regmap *grf = rockchip_clk_get_grf();
+
+ if (IS_ERR(grf))
return;
- }
pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
__func__, clk_hw_get_name(hw));
- prate = clk_hw_get_rate(parent);
- rockchip_rk3066_pll_set_rate(hw, drate, prate);
+ rockchip_rk3066_pll_set_params(pll, rate);
}
}
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 24938815655f..be6c7fd8315d 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -135,9 +135,11 @@ static struct clk *rockchip_clk_register_frac_branch(const char *name,
div->flags = div_flags;
div->reg = base + muxdiv_offset;
div->mshift = 16;
- div->mmask = 0xffff0000;
+ div->mwidth = 16;
+ div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
div->nshift = 0;
- div->nmask = 0xffff;
+ div->nwidth = 16;
+ div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
div->lock = lock;
div_ops = &clk_fractional_divider_ops;
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index 8524e667097e..55f8e2e24ab8 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -32,39 +32,41 @@
#define DIV_TOPC0 0x0600
#define DIV_TOPC1 0x0604
#define DIV_TOPC3 0x060C
+#define ENABLE_ACLK_TOPC0 0x0800
#define ENABLE_ACLK_TOPC1 0x0804
+#define ENABLE_SCLK_TOPC1 0x0A04
static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
- FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0),
+ FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_topc_bus0_pll", 1, 2, 0),
FFACTOR(0, "ffac_topc_bus0_pll_div4",
"ffac_topc_bus0_pll_div2", 1, 2, 0),
- FFACTOR(0, "ffac_topc_bus1_pll_div2", "mout_bus1_pll_ctrl", 1, 2, 0),
- FFACTOR(0, "ffac_topc_cc_pll_div2", "mout_cc_pll_ctrl", 1, 2, 0),
- FFACTOR(0, "ffac_topc_mfc_pll_div2", "mout_mfc_pll_ctrl", 1, 2, 0),
+ FFACTOR(0, "ffac_topc_bus1_pll_div2", "mout_topc_bus1_pll", 1, 2, 0),
+ FFACTOR(0, "ffac_topc_cc_pll_div2", "mout_topc_cc_pll", 1, 2, 0),
+ FFACTOR(0, "ffac_topc_mfc_pll_div2", "mout_topc_mfc_pll", 1, 2, 0),
};
/* List of parent clocks for Muxes in CMU_TOPC */
-PNAME(mout_aud_pll_ctrl_p) = { "fin_pll", "fout_aud_pll" };
-PNAME(mout_bus0_pll_ctrl_p) = { "fin_pll", "fout_bus0_pll" };
-PNAME(mout_bus1_pll_ctrl_p) = { "fin_pll", "fout_bus1_pll" };
-PNAME(mout_cc_pll_ctrl_p) = { "fin_pll", "fout_cc_pll" };
-PNAME(mout_mfc_pll_ctrl_p) = { "fin_pll", "fout_mfc_pll" };
+PNAME(mout_topc_aud_pll_ctrl_p) = { "fin_pll", "fout_aud_pll" };
+PNAME(mout_topc_bus0_pll_ctrl_p) = { "fin_pll", "fout_bus0_pll" };
+PNAME(mout_topc_bus1_pll_ctrl_p) = { "fin_pll", "fout_bus1_pll" };
+PNAME(mout_topc_cc_pll_ctrl_p) = { "fin_pll", "fout_cc_pll" };
+PNAME(mout_topc_mfc_pll_ctrl_p) = { "fin_pll", "fout_mfc_pll" };
-PNAME(mout_topc_group2) = { "mout_sclk_bus0_pll_cmuc",
- "mout_sclk_bus1_pll_cmuc", "mout_sclk_cc_pll_cmuc",
- "mout_sclk_mfc_pll_cmuc" };
+PNAME(mout_topc_group2) = { "mout_topc_bus0_pll_half",
+ "mout_topc_bus1_pll_half", "mout_topc_cc_pll_half",
+ "mout_topc_mfc_pll_half" };
-PNAME(mout_sclk_bus0_pll_cmuc_p) = { "mout_bus0_pll_ctrl",
+PNAME(mout_topc_bus0_pll_half_p) = { "mout_topc_bus0_pll",
"ffac_topc_bus0_pll_div2", "ffac_topc_bus0_pll_div4"};
-PNAME(mout_sclk_bus1_pll_cmuc_p) = { "mout_bus1_pll_ctrl",
+PNAME(mout_topc_bus1_pll_half_p) = { "mout_topc_bus1_pll",
"ffac_topc_bus1_pll_div2"};
-PNAME(mout_sclk_cc_pll_cmuc_p) = { "mout_cc_pll_ctrl",
+PNAME(mout_topc_cc_pll_half_p) = { "mout_topc_cc_pll",
"ffac_topc_cc_pll_div2"};
-PNAME(mout_sclk_mfc_pll_cmuc_p) = { "mout_mfc_pll_ctrl",
+PNAME(mout_topc_mfc_pll_half_p) = { "mout_topc_mfc_pll",
"ffac_topc_mfc_pll_div2"};
-PNAME(mout_sclk_bus0_pll_out_p) = {"mout_bus0_pll_ctrl",
+PNAME(mout_topc_bus0_pll_out_p) = {"mout_topc_bus0_pll",
"ffac_topc_bus0_pll_div2"};
static unsigned long topc_clk_regs[] __initdata = {
@@ -88,23 +90,27 @@ static unsigned long topc_clk_regs[] __initdata = {
};
static struct samsung_mux_clock topc_mux_clks[] __initdata = {
- MUX(0, "mout_bus0_pll_ctrl", mout_bus0_pll_ctrl_p, MUX_SEL_TOPC0, 0, 1),
- MUX(0, "mout_bus1_pll_ctrl", mout_bus1_pll_ctrl_p, MUX_SEL_TOPC0, 4, 1),
- MUX(0, "mout_cc_pll_ctrl", mout_cc_pll_ctrl_p, MUX_SEL_TOPC0, 8, 1),
- MUX(0, "mout_mfc_pll_ctrl", mout_mfc_pll_ctrl_p, MUX_SEL_TOPC0, 12, 1),
-
- MUX(0, "mout_sclk_bus0_pll_cmuc", mout_sclk_bus0_pll_cmuc_p,
+ MUX(0, "mout_topc_bus0_pll", mout_topc_bus0_pll_ctrl_p,
+ MUX_SEL_TOPC0, 0, 1),
+ MUX(0, "mout_topc_bus1_pll", mout_topc_bus1_pll_ctrl_p,
+ MUX_SEL_TOPC0, 4, 1),
+ MUX(0, "mout_topc_cc_pll", mout_topc_cc_pll_ctrl_p,
+ MUX_SEL_TOPC0, 8, 1),
+ MUX(0, "mout_topc_mfc_pll", mout_topc_mfc_pll_ctrl_p,
+ MUX_SEL_TOPC0, 12, 1),
+ MUX(0, "mout_topc_bus0_pll_half", mout_topc_bus0_pll_half_p,
MUX_SEL_TOPC0, 16, 2),
- MUX(0, "mout_sclk_bus1_pll_cmuc", mout_sclk_bus1_pll_cmuc_p,
+ MUX(0, "mout_topc_bus1_pll_half", mout_topc_bus1_pll_half_p,
MUX_SEL_TOPC0, 20, 1),
- MUX(0, "mout_sclk_cc_pll_cmuc", mout_sclk_cc_pll_cmuc_p,
+ MUX(0, "mout_topc_cc_pll_half", mout_topc_cc_pll_half_p,
MUX_SEL_TOPC0, 24, 1),
- MUX(0, "mout_sclk_mfc_pll_cmuc", mout_sclk_mfc_pll_cmuc_p,
+ MUX(0, "mout_topc_mfc_pll_half", mout_topc_mfc_pll_half_p,
MUX_SEL_TOPC0, 28, 1),
- MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p,
+ MUX(0, "mout_topc_aud_pll", mout_topc_aud_pll_ctrl_p,
+ MUX_SEL_TOPC1, 0, 1),
+ MUX(0, "mout_topc_bus0_pll_out", mout_topc_bus0_pll_out_p,
MUX_SEL_TOPC1, 16, 1),
- MUX(0, "mout_aud_pll_ctrl", mout_aud_pll_ctrl_p, MUX_SEL_TOPC1, 0, 1),
MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2),
@@ -121,16 +127,16 @@ static struct samsung_div_clock topc_div_clks[] __initdata = {
DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66",
DIV_TOPC1, 24, 4),
- DIV(DOUT_SCLK_BUS0_PLL, "dout_sclk_bus0_pll", "mout_sclk_bus0_pll_out",
- DIV_TOPC3, 0, 3),
- DIV(DOUT_SCLK_BUS1_PLL, "dout_sclk_bus1_pll", "mout_bus1_pll_ctrl",
- DIV_TOPC3, 8, 3),
- DIV(DOUT_SCLK_CC_PLL, "dout_sclk_cc_pll", "mout_cc_pll_ctrl",
- DIV_TOPC3, 12, 3),
- DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl",
- DIV_TOPC3, 16, 3),
- DIV(DOUT_SCLK_AUD_PLL, "dout_sclk_aud_pll", "mout_aud_pll_ctrl",
- DIV_TOPC3, 28, 3),
+ DIV(DOUT_SCLK_BUS0_PLL, "dout_sclk_bus0_pll", "mout_topc_bus0_pll_out",
+ DIV_TOPC3, 0, 4),
+ DIV(DOUT_SCLK_BUS1_PLL, "dout_sclk_bus1_pll", "mout_topc_bus1_pll",
+ DIV_TOPC3, 8, 4),
+ DIV(DOUT_SCLK_CC_PLL, "dout_sclk_cc_pll", "mout_topc_cc_pll",
+ DIV_TOPC3, 12, 4),
+ DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_topc_mfc_pll",
+ DIV_TOPC3, 16, 4),
+ DIV(DOUT_SCLK_AUD_PLL, "dout_sclk_aud_pll", "mout_topc_aud_pll",
+ DIV_TOPC3, 28, 4),
};
static struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initdata = {
@@ -139,8 +145,33 @@ static struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initdata = {
};
static struct samsung_gate_clock topc_gate_clks[] __initdata = {
+ GATE(ACLK_CCORE_133, "aclk_ccore_133", "dout_aclk_ccore_133",
+ ENABLE_ACLK_TOPC0, 4, 0, 0),
+
GATE(ACLK_MSCL_532, "aclk_mscl_532", "dout_aclk_mscl_532",
ENABLE_ACLK_TOPC1, 20, 0, 0),
+
+ GATE(ACLK_PERIS_66, "aclk_peris_66", "dout_aclk_peris_66",
+ ENABLE_ACLK_TOPC1, 24, 0, 0),
+
+ GATE(SCLK_AUD_PLL, "sclk_aud_pll", "dout_sclk_aud_pll",
+ ENABLE_SCLK_TOPC1, 20, 0, 0),
+ GATE(SCLK_MFC_PLL_B, "sclk_mfc_pll_b", "dout_sclk_mfc_pll",
+ ENABLE_SCLK_TOPC1, 17, 0, 0),
+ GATE(SCLK_MFC_PLL_A, "sclk_mfc_pll_a", "dout_sclk_mfc_pll",
+ ENABLE_SCLK_TOPC1, 16, 0, 0),
+ GATE(SCLK_BUS1_PLL_B, "sclk_bus1_pll_b", "dout_sclk_bus1_pll",
+ ENABLE_SCLK_TOPC1, 13, 0, 0),
+ GATE(SCLK_BUS1_PLL_A, "sclk_bus1_pll_a", "dout_sclk_bus1_pll",
+ ENABLE_SCLK_TOPC1, 12, 0, 0),
+ GATE(SCLK_BUS0_PLL_B, "sclk_bus0_pll_b", "dout_sclk_bus0_pll",
+ ENABLE_SCLK_TOPC1, 5, 0, 0),
+ GATE(SCLK_BUS0_PLL_A, "sclk_bus0_pll_a", "dout_sclk_bus0_pll",
+ ENABLE_SCLK_TOPC1, 4, 0, 0),
+ GATE(SCLK_CC_PLL_B, "sclk_cc_pll_b", "dout_sclk_cc_pll",
+ ENABLE_SCLK_TOPC1, 1, 0, 0),
+ GATE(SCLK_CC_PLL_A, "sclk_cc_pll_a", "dout_sclk_cc_pll",
+ ENABLE_SCLK_TOPC1, 0, 0, 0),
};
static struct samsung_pll_clock topc_pll_clks[] __initdata = {
@@ -193,36 +224,37 @@ CLK_OF_DECLARE(exynos7_clk_topc, "samsung,exynos7-clock-topc",
#define DIV_TOP0_PERIC1 0x0634
#define DIV_TOP0_PERIC2 0x0638
#define DIV_TOP0_PERIC3 0x063C
+#define ENABLE_ACLK_TOP03 0x080C
#define ENABLE_SCLK_TOP0_PERIC0 0x0A30
#define ENABLE_SCLK_TOP0_PERIC1 0x0A34
#define ENABLE_SCLK_TOP0_PERIC2 0x0A38
#define ENABLE_SCLK_TOP0_PERIC3 0x0A3C
/* List of parent clocks for Muxes in CMU_TOP0 */
-PNAME(mout_bus0_pll_p) = { "fin_pll", "dout_sclk_bus0_pll" };
-PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" };
-PNAME(mout_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll" };
-PNAME(mout_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll" };
-PNAME(mout_aud_pll_p) = { "fin_pll", "dout_sclk_aud_pll" };
+PNAME(mout_top0_bus0_pll_user_p) = { "fin_pll", "sclk_bus0_pll_a" };
+PNAME(mout_top0_bus1_pll_user_p) = { "fin_pll", "sclk_bus1_pll_a" };
+PNAME(mout_top0_cc_pll_user_p) = { "fin_pll", "sclk_cc_pll_a" };
+PNAME(mout_top0_mfc_pll_user_p) = { "fin_pll", "sclk_mfc_pll_a" };
+PNAME(mout_top0_aud_pll_user_p) = { "fin_pll", "sclk_aud_pll" };
-PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll",
+PNAME(mout_top0_bus0_pll_half_p) = {"mout_top0_bus0_pll_user",
"ffac_top0_bus0_pll_div2"};
-PNAME(mout_top0_half_bus1_pll_p) = {"mout_top0_bus1_pll",
+PNAME(mout_top0_bus1_pll_half_p) = {"mout_top0_bus1_pll_user",
"ffac_top0_bus1_pll_div2"};
-PNAME(mout_top0_half_cc_pll_p) = {"mout_top0_cc_pll",
+PNAME(mout_top0_cc_pll_half_p) = {"mout_top0_cc_pll_user",
"ffac_top0_cc_pll_div2"};
-PNAME(mout_top0_half_mfc_pll_p) = {"mout_top0_mfc_pll",
+PNAME(mout_top0_mfc_pll_half_p) = {"mout_top0_mfc_pll_user",
"ffac_top0_mfc_pll_div2"};
-PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll",
- "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll",
- "mout_top0_half_mfc_pll"};
+PNAME(mout_top0_group1) = {"mout_top0_bus0_pll_half",
+ "mout_top0_bus1_pll_half", "mout_top0_cc_pll_half",
+ "mout_top0_mfc_pll_half"};
PNAME(mout_top0_group3) = {"ioclk_audiocdclk0",
"ioclk_audiocdclk1", "ioclk_spdif_extclk",
- "mout_top0_aud_pll", "mout_top0_half_bus0_pll",
- "mout_top0_half_bus1_pll"};
-PNAME(mout_top0_group4) = {"ioclk_audiocdclk1", "mout_top0_aud_pll",
- "mout_top0_half_bus0_pll", "mout_top0_half_bus1_pll"};
+ "mout_top0_aud_pll_user", "mout_top0_bus0_pll_half",
+ "mout_top0_bus1_pll_half"};
+PNAME(mout_top0_group4) = {"ioclk_audiocdclk1", "mout_top0_aud_pll_user",
+ "mout_top0_bus0_pll_half", "mout_top0_bus1_pll_half"};
static unsigned long top0_clk_regs[] __initdata = {
MUX_SEL_TOP00,
@@ -244,19 +276,24 @@ static unsigned long top0_clk_regs[] __initdata = {
};
static struct samsung_mux_clock top0_mux_clks[] __initdata = {
- MUX(0, "mout_top0_aud_pll", mout_aud_pll_p, MUX_SEL_TOP00, 0, 1),
- MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1),
- MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1),
- MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1),
- MUX(0, "mout_top0_bus0_pll", mout_bus0_pll_p, MUX_SEL_TOP00, 16, 1),
-
- MUX(0, "mout_top0_half_mfc_pll", mout_top0_half_mfc_pll_p,
+ MUX(0, "mout_top0_aud_pll_user", mout_top0_aud_pll_user_p,
+ MUX_SEL_TOP00, 0, 1),
+ MUX(0, "mout_top0_mfc_pll_user", mout_top0_mfc_pll_user_p,
+ MUX_SEL_TOP00, 4, 1),
+ MUX(0, "mout_top0_cc_pll_user", mout_top0_cc_pll_user_p,
+ MUX_SEL_TOP00, 8, 1),
+ MUX(0, "mout_top0_bus1_pll_user", mout_top0_bus1_pll_user_p,
+ MUX_SEL_TOP00, 12, 1),
+ MUX(0, "mout_top0_bus0_pll_user", mout_top0_bus0_pll_user_p,
+ MUX_SEL_TOP00, 16, 1),
+
+ MUX(0, "mout_top0_mfc_pll_half", mout_top0_mfc_pll_half_p,
MUX_SEL_TOP01, 4, 1),
- MUX(0, "mout_top0_half_cc_pll", mout_top0_half_cc_pll_p,
+ MUX(0, "mout_top0_cc_pll_half", mout_top0_cc_pll_half_p,
MUX_SEL_TOP01, 8, 1),
- MUX(0, "mout_top0_half_bus1_pll", mout_top0_half_bus1_pll_p,
+ MUX(0, "mout_top0_bus1_pll_half", mout_top0_bus1_pll_half_p,
MUX_SEL_TOP01, 12, 1),
- MUX(0, "mout_top0_half_bus0_pll", mout_top0_half_bus0_pll_p,
+ MUX(0, "mout_top0_bus0_pll_half", mout_top0_bus0_pll_half_p,
MUX_SEL_TOP01, 16, 1),
MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2),
@@ -302,6 +339,11 @@ static struct samsung_div_clock top0_div_clks[] __initdata = {
};
static struct samsung_gate_clock top0_gate_clks[] __initdata = {
+ GATE(CLK_ACLK_PERIC0_66, "aclk_peric0_66", "dout_aclk_peric0_66",
+ ENABLE_ACLK_TOP03, 20, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_ACLK_PERIC1_66, "aclk_peric1_66", "dout_aclk_peric1_66",
+ ENABLE_ACLK_TOP03, 12, CLK_SET_RATE_PARENT, 0),
+
GATE(CLK_SCLK_SPDIF, "sclk_spdif", "dout_sclk_spdif",
ENABLE_SCLK_TOP0_PERIC0, 4, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_PCM1, "sclk_pcm1", "dout_sclk_pcm1",
@@ -331,10 +373,12 @@ static struct samsung_gate_clock top0_gate_clks[] __initdata = {
};
static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
- FFACTOR(0, "ffac_top0_bus0_pll_div2", "mout_top0_bus0_pll", 1, 2, 0),
- FFACTOR(0, "ffac_top0_bus1_pll_div2", "mout_top0_bus1_pll", 1, 2, 0),
- FFACTOR(0, "ffac_top0_cc_pll_div2", "mout_top0_cc_pll", 1, 2, 0),
- FFACTOR(0, "ffac_top0_mfc_pll_div2", "mout_top0_mfc_pll", 1, 2, 0),
+ FFACTOR(0, "ffac_top0_bus0_pll_div2", "mout_top0_bus0_pll_user",
+ 1, 2, 0),
+ FFACTOR(0, "ffac_top0_bus1_pll_div2", "mout_top0_bus1_pll_user",
+ 1, 2, 0),
+ FFACTOR(0, "ffac_top0_cc_pll_div2", "mout_top0_cc_pll_user", 1, 2, 0),
+ FFACTOR(0, "ffac_top0_mfc_pll_div2", "mout_top0_mfc_pll_user", 1, 2, 0),
};
static struct samsung_cmu_info top0_cmu_info __initdata = {
@@ -365,31 +409,34 @@ CLK_OF_DECLARE(exynos7_clk_top0, "samsung,exynos7-clock-top0",
#define MUX_SEL_TOP13 0x020C
#define MUX_SEL_TOP1_FSYS0 0x0224
#define MUX_SEL_TOP1_FSYS1 0x0228
+#define MUX_SEL_TOP1_FSYS11 0x022C
#define DIV_TOP13 0x060C
#define DIV_TOP1_FSYS0 0x0624
#define DIV_TOP1_FSYS1 0x0628
+#define DIV_TOP1_FSYS11 0x062C
#define ENABLE_ACLK_TOP13 0x080C
#define ENABLE_SCLK_TOP1_FSYS0 0x0A24
#define ENABLE_SCLK_TOP1_FSYS1 0x0A28
+#define ENABLE_SCLK_TOP1_FSYS11 0x0A2C
/* List of parent clocks for Muxes in CMU_TOP1 */
-PNAME(mout_top1_bus0_pll_p) = { "fin_pll", "dout_sclk_bus0_pll" };
-PNAME(mout_top1_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll_b" };
-PNAME(mout_top1_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll_b" };
-PNAME(mout_top1_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll_b" };
+PNAME(mout_top1_bus0_pll_user_p) = { "fin_pll", "sclk_bus0_pll_b" };
+PNAME(mout_top1_bus1_pll_user_p) = { "fin_pll", "sclk_bus1_pll_b" };
+PNAME(mout_top1_cc_pll_user_p) = { "fin_pll", "sclk_cc_pll_b" };
+PNAME(mout_top1_mfc_pll_user_p) = { "fin_pll", "sclk_mfc_pll_b" };
-PNAME(mout_top1_half_bus0_pll_p) = {"mout_top1_bus0_pll",
+PNAME(mout_top1_bus0_pll_half_p) = {"mout_top1_bus0_pll_user",
"ffac_top1_bus0_pll_div2"};
-PNAME(mout_top1_half_bus1_pll_p) = {"mout_top1_bus1_pll",
+PNAME(mout_top1_bus1_pll_half_p) = {"mout_top1_bus1_pll_user",
"ffac_top1_bus1_pll_div2"};
-PNAME(mout_top1_half_cc_pll_p) = {"mout_top1_cc_pll",
+PNAME(mout_top1_cc_pll_half_p) = {"mout_top1_cc_pll_user",
"ffac_top1_cc_pll_div2"};
-PNAME(mout_top1_half_mfc_pll_p) = {"mout_top1_mfc_pll",
+PNAME(mout_top1_mfc_pll_half_p) = {"mout_top1_mfc_pll_user",
"ffac_top1_mfc_pll_div2"};
-PNAME(mout_top1_group1) = {"mout_top1_half_bus0_pll",
- "mout_top1_half_bus1_pll", "mout_top1_half_cc_pll",
- "mout_top1_half_mfc_pll"};
+PNAME(mout_top1_group1) = {"mout_top1_bus0_pll_half",
+ "mout_top1_bus1_pll_half", "mout_top1_cc_pll_half",
+ "mout_top1_mfc_pll_half"};
static unsigned long top1_clk_regs[] __initdata = {
MUX_SEL_TOP10,
@@ -397,40 +444,54 @@ static unsigned long top1_clk_regs[] __initdata = {
MUX_SEL_TOP13,
MUX_SEL_TOP1_FSYS0,
MUX_SEL_TOP1_FSYS1,
+ MUX_SEL_TOP1_FSYS11,
DIV_TOP13,
DIV_TOP1_FSYS0,
DIV_TOP1_FSYS1,
+ DIV_TOP1_FSYS11,
ENABLE_ACLK_TOP13,
ENABLE_SCLK_TOP1_FSYS0,
ENABLE_SCLK_TOP1_FSYS1,
+ ENABLE_SCLK_TOP1_FSYS11,
};
static struct samsung_mux_clock top1_mux_clks[] __initdata = {
- MUX(0, "mout_top1_mfc_pll", mout_top1_mfc_pll_p, MUX_SEL_TOP10, 4, 1),
- MUX(0, "mout_top1_cc_pll", mout_top1_cc_pll_p, MUX_SEL_TOP10, 8, 1),
- MUX(0, "mout_top1_bus1_pll", mout_top1_bus1_pll_p,
+ MUX(0, "mout_top1_mfc_pll_user", mout_top1_mfc_pll_user_p,
+ MUX_SEL_TOP10, 4, 1),
+ MUX(0, "mout_top1_cc_pll_user", mout_top1_cc_pll_user_p,
+ MUX_SEL_TOP10, 8, 1),
+ MUX(0, "mout_top1_bus1_pll_user", mout_top1_bus1_pll_user_p,
MUX_SEL_TOP10, 12, 1),
- MUX(0, "mout_top1_bus0_pll", mout_top1_bus0_pll_p,
+ MUX(0, "mout_top1_bus0_pll_user", mout_top1_bus0_pll_user_p,
MUX_SEL_TOP10, 16, 1),
- MUX(0, "mout_top1_half_mfc_pll", mout_top1_half_mfc_pll_p,
+ MUX(0, "mout_top1_mfc_pll_half", mout_top1_mfc_pll_half_p,
MUX_SEL_TOP11, 4, 1),
- MUX(0, "mout_top1_half_cc_pll", mout_top1_half_cc_pll_p,
+ MUX(0, "mout_top1_cc_pll_half", mout_top1_cc_pll_half_p,
MUX_SEL_TOP11, 8, 1),
- MUX(0, "mout_top1_half_bus1_pll", mout_top1_half_bus1_pll_p,
+ MUX(0, "mout_top1_bus1_pll_half", mout_top1_bus1_pll_half_p,
MUX_SEL_TOP11, 12, 1),
- MUX(0, "mout_top1_half_bus0_pll", mout_top1_half_bus0_pll_p,
+ MUX(0, "mout_top1_bus0_pll_half", mout_top1_bus0_pll_half_p,
MUX_SEL_TOP11, 16, 1),
MUX(0, "mout_aclk_fsys1_200", mout_top1_group1, MUX_SEL_TOP13, 24, 2),
MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2),
- MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2),
+ MUX(0, "mout_sclk_phy_fsys0_26m", mout_top1_group1,
+ MUX_SEL_TOP1_FSYS0, 0, 2),
+ MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 16, 2),
MUX(0, "mout_sclk_usbdrd300", mout_top1_group1,
MUX_SEL_TOP1_FSYS0, 28, 2),
- MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2),
- MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2),
+ MUX(0, "mout_sclk_phy_fsys1", mout_top1_group1,
+ MUX_SEL_TOP1_FSYS1, 0, 2),
+ MUX(0, "mout_sclk_ufsunipro20", mout_top1_group1,
+ MUX_SEL_TOP1_FSYS1, 16, 2),
+
+ MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS11, 0, 2),
+ MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS11, 12, 2),
+ MUX(0, "mout_sclk_phy_fsys1_26m", mout_top1_group1,
+ MUX_SEL_TOP1_FSYS11, 24, 2),
};
static struct samsung_div_clock top1_div_clks[] __initdata = {
@@ -439,34 +500,61 @@ static struct samsung_div_clock top1_div_clks[] __initdata = {
DIV(DOUT_ACLK_FSYS0_200, "dout_aclk_fsys0_200", "mout_aclk_fsys0_200",
DIV_TOP13, 28, 4),
+ DIV(DOUT_SCLK_PHY_FSYS1, "dout_sclk_phy_fsys1",
+ "mout_sclk_phy_fsys1", DIV_TOP1_FSYS1, 0, 6),
+
+ DIV(DOUT_SCLK_UFSUNIPRO20, "dout_sclk_ufsunipro20",
+ "mout_sclk_ufsunipro20",
+ DIV_TOP1_FSYS1, 16, 6),
+
DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2",
- DIV_TOP1_FSYS0, 24, 4),
+ DIV_TOP1_FSYS0, 16, 10),
DIV(0, "dout_sclk_usbdrd300", "mout_sclk_usbdrd300",
DIV_TOP1_FSYS0, 28, 4),
DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1",
- DIV_TOP1_FSYS1, 24, 4),
+ DIV_TOP1_FSYS11, 0, 10),
DIV(DOUT_SCLK_MMC0, "dout_sclk_mmc0", "mout_sclk_mmc0",
- DIV_TOP1_FSYS1, 28, 4),
+ DIV_TOP1_FSYS11, 12, 10),
+
+ DIV(DOUT_SCLK_PHY_FSYS1_26M, "dout_sclk_phy_fsys1_26m",
+ "mout_sclk_phy_fsys1_26m", DIV_TOP1_FSYS11, 24, 6),
};
static struct samsung_gate_clock top1_gate_clks[] __initdata = {
GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2",
- ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_TOP1_FSYS0, 16, CLK_SET_RATE_PARENT, 0),
GATE(0, "sclk_usbdrd300", "dout_sclk_usbdrd300",
ENABLE_SCLK_TOP1_FSYS0, 28, 0, 0),
+ GATE(CLK_SCLK_PHY_FSYS1, "sclk_phy_fsys1", "dout_sclk_phy_fsys1",
+ ENABLE_SCLK_TOP1_FSYS1, 0, CLK_SET_RATE_PARENT, 0),
+
+ GATE(CLK_SCLK_UFSUNIPRO20, "sclk_ufsunipro20", "dout_sclk_ufsunipro20",
+ ENABLE_SCLK_TOP1_FSYS1, 16, CLK_SET_RATE_PARENT, 0),
+
GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1",
- ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_TOP1_FSYS11, 0, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_MMC0, "sclk_mmc0", "dout_sclk_mmc0",
- ENABLE_SCLK_TOP1_FSYS1, 28, CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_TOP1_FSYS11, 12, CLK_SET_RATE_PARENT, 0),
+
+ GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
+ ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
+ ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
+
+ GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m",
+ "dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11,
+ 24, CLK_SET_RATE_PARENT, 0),
};
static struct samsung_fixed_factor_clock top1_fixed_factor_clks[] __initdata = {
- FFACTOR(0, "ffac_top1_bus0_pll_div2", "mout_top1_bus0_pll", 1, 2, 0),
- FFACTOR(0, "ffac_top1_bus1_pll_div2", "mout_top1_bus1_pll", 1, 2, 0),
- FFACTOR(0, "ffac_top1_cc_pll_div2", "mout_top1_cc_pll", 1, 2, 0),
- FFACTOR(0, "ffac_top1_mfc_pll_div2", "mout_top1_mfc_pll", 1, 2, 0),
+ FFACTOR(0, "ffac_top1_bus0_pll_div2", "mout_top1_bus0_pll_user",
+ 1, 2, 0),
+ FFACTOR(0, "ffac_top1_bus1_pll_div2", "mout_top1_bus1_pll_user",
+ 1, 2, 0),
+ FFACTOR(0, "ffac_top1_cc_pll_div2", "mout_top1_cc_pll_user", 1, 2, 0),
+ FFACTOR(0, "ffac_top1_mfc_pll_div2", "mout_top1_mfc_pll_user", 1, 2, 0),
};
static struct samsung_cmu_info top1_cmu_info __initdata = {
@@ -501,7 +589,7 @@ CLK_OF_DECLARE(exynos7_clk_top1, "samsung,exynos7-clock-top1",
/*
* List of parent clocks for Muxes in CMU_CCORE
*/
-PNAME(mout_aclk_ccore_133_p) = { "fin_pll", "dout_aclk_ccore_133" };
+PNAME(mout_aclk_ccore_133_user_p) = { "fin_pll", "aclk_ccore_133" };
static unsigned long ccore_clk_regs[] __initdata = {
MUX_SEL_CCORE,
@@ -509,7 +597,7 @@ static unsigned long ccore_clk_regs[] __initdata = {
};
static struct samsung_mux_clock ccore_mux_clks[] __initdata = {
- MUX(0, "mout_aclk_ccore_133_user", mout_aclk_ccore_133_p,
+ MUX(0, "mout_aclk_ccore_133_user", mout_aclk_ccore_133_user_p,
MUX_SEL_CCORE, 1, 1),
};
@@ -542,8 +630,8 @@ CLK_OF_DECLARE(exynos7_clk_ccore, "samsung,exynos7-clock-ccore",
#define ENABLE_SCLK_PERIC0 0x0A00
/* List of parent clocks for Muxes in CMU_PERIC0 */
-PNAME(mout_aclk_peric0_66_p) = { "fin_pll", "dout_aclk_peric0_66" };
-PNAME(mout_sclk_uart0_p) = { "fin_pll", "sclk_uart0" };
+PNAME(mout_aclk_peric0_66_user_p) = { "fin_pll", "aclk_peric0_66" };
+PNAME(mout_sclk_uart0_user_p) = { "fin_pll", "sclk_uart0" };
static unsigned long peric0_clk_regs[] __initdata = {
MUX_SEL_PERIC0,
@@ -552,9 +640,9 @@ static unsigned long peric0_clk_regs[] __initdata = {
};
static struct samsung_mux_clock peric0_mux_clks[] __initdata = {
- MUX(0, "mout_aclk_peric0_66_user", mout_aclk_peric0_66_p,
+ MUX(0, "mout_aclk_peric0_66_user", mout_aclk_peric0_66_user_p,
MUX_SEL_PERIC0, 0, 1),
- MUX(0, "mout_sclk_uart0_user", mout_sclk_uart0_p,
+ MUX(0, "mout_sclk_uart0_user", mout_sclk_uart0_user_p,
MUX_SEL_PERIC0, 16, 1),
};
@@ -611,15 +699,15 @@ CLK_OF_DECLARE(exynos7_clk_peric0, "samsung,exynos7-clock-peric0",
exynos7_clk_peric0_init);
/* List of parent clocks for Muxes in CMU_PERIC1 */
-PNAME(mout_aclk_peric1_66_p) = { "fin_pll", "dout_aclk_peric1_66" };
-PNAME(mout_sclk_uart1_p) = { "fin_pll", "sclk_uart1" };
-PNAME(mout_sclk_uart2_p) = { "fin_pll", "sclk_uart2" };
-PNAME(mout_sclk_uart3_p) = { "fin_pll", "sclk_uart3" };
-PNAME(mout_sclk_spi0_p) = { "fin_pll", "sclk_spi0" };
-PNAME(mout_sclk_spi1_p) = { "fin_pll", "sclk_spi1" };
-PNAME(mout_sclk_spi2_p) = { "fin_pll", "sclk_spi2" };
-PNAME(mout_sclk_spi3_p) = { "fin_pll", "sclk_spi3" };
-PNAME(mout_sclk_spi4_p) = { "fin_pll", "sclk_spi4" };
+PNAME(mout_aclk_peric1_66_user_p) = { "fin_pll", "aclk_peric1_66" };
+PNAME(mout_sclk_uart1_user_p) = { "fin_pll", "sclk_uart1" };
+PNAME(mout_sclk_uart2_user_p) = { "fin_pll", "sclk_uart2" };
+PNAME(mout_sclk_uart3_user_p) = { "fin_pll", "sclk_uart3" };
+PNAME(mout_sclk_spi0_user_p) = { "fin_pll", "sclk_spi0" };
+PNAME(mout_sclk_spi1_user_p) = { "fin_pll", "sclk_spi1" };
+PNAME(mout_sclk_spi2_user_p) = { "fin_pll", "sclk_spi2" };
+PNAME(mout_sclk_spi3_user_p) = { "fin_pll", "sclk_spi3" };
+PNAME(mout_sclk_spi4_user_p) = { "fin_pll", "sclk_spi4" };
static unsigned long peric1_clk_regs[] __initdata = {
MUX_SEL_PERIC10,
@@ -630,24 +718,24 @@ static unsigned long peric1_clk_regs[] __initdata = {
};
static struct samsung_mux_clock peric1_mux_clks[] __initdata = {
- MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p,
+ MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_user_p,
MUX_SEL_PERIC10, 0, 1),
- MUX_F(0, "mout_sclk_spi0_user", mout_sclk_spi0_p,
+ MUX_F(0, "mout_sclk_spi0_user", mout_sclk_spi0_user_p,
MUX_SEL_PERIC11, 0, 1, CLK_SET_RATE_PARENT, 0),
- MUX_F(0, "mout_sclk_spi1_user", mout_sclk_spi1_p,
+ MUX_F(0, "mout_sclk_spi1_user", mout_sclk_spi1_user_p,
MUX_SEL_PERIC11, 4, 1, CLK_SET_RATE_PARENT, 0),
- MUX_F(0, "mout_sclk_spi2_user", mout_sclk_spi2_p,
+ MUX_F(0, "mout_sclk_spi2_user", mout_sclk_spi2_user_p,
MUX_SEL_PERIC11, 8, 1, CLK_SET_RATE_PARENT, 0),
- MUX_F(0, "mout_sclk_spi3_user", mout_sclk_spi3_p,
+ MUX_F(0, "mout_sclk_spi3_user", mout_sclk_spi3_user_p,
MUX_SEL_PERIC11, 12, 1, CLK_SET_RATE_PARENT, 0),
- MUX_F(0, "mout_sclk_spi4_user", mout_sclk_spi4_p,
+ MUX_F(0, "mout_sclk_spi4_user", mout_sclk_spi4_user_p,
MUX_SEL_PERIC11, 16, 1, CLK_SET_RATE_PARENT, 0),
- MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p,
+ MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_user_p,
MUX_SEL_PERIC11, 20, 1),
- MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p,
+ MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_user_p,
MUX_SEL_PERIC11, 24, 1),
- MUX(0, "mout_sclk_uart3_user", mout_sclk_uart3_p,
+ MUX(0, "mout_sclk_uart3_user", mout_sclk_uart3_user_p,
MUX_SEL_PERIC11, 28, 1),
};
@@ -735,7 +823,7 @@ CLK_OF_DECLARE(exynos7_clk_peric1, "samsung,exynos7-clock-peric1",
#define ENABLE_SCLK_PERIS_SECURE_CHIPID 0x0A10
/* List of parent clocks for Muxes in CMU_PERIS */
-PNAME(mout_aclk_peris_66_p) = { "fin_pll", "dout_aclk_peris_66" };
+PNAME(mout_aclk_peris_66_user_p) = { "fin_pll", "aclk_peris_66" };
static unsigned long peris_clk_regs[] __initdata = {
MUX_SEL_PERIS,
@@ -747,7 +835,7 @@ static unsigned long peris_clk_regs[] __initdata = {
static struct samsung_mux_clock peris_mux_clks[] __initdata = {
MUX(0, "mout_aclk_peris_66_user",
- mout_aclk_peris_66_p, MUX_SEL_PERIS, 0, 1),
+ mout_aclk_peris_66_user_p, MUX_SEL_PERIS, 0, 1),
};
static struct samsung_gate_clock peris_gate_clks[] __initdata = {
@@ -795,17 +883,17 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
/*
* List of parent clocks for Muxes in CMU_FSYS0
*/
-PNAME(mout_aclk_fsys0_200_p) = { "fin_pll", "dout_aclk_fsys0_200" };
-PNAME(mout_sclk_mmc2_p) = { "fin_pll", "sclk_mmc2" };
+PNAME(mout_aclk_fsys0_200_user_p) = { "fin_pll", "aclk_fsys0_200" };
+PNAME(mout_sclk_mmc2_user_p) = { "fin_pll", "sclk_mmc2" };
-PNAME(mout_sclk_usbdrd300_p) = { "fin_pll", "sclk_usbdrd300" };
-PNAME(mout_phyclk_usbdrd300_udrd30_phyclk_p) = { "fin_pll",
+PNAME(mout_sclk_usbdrd300_user_p) = { "fin_pll", "sclk_usbdrd300" };
+PNAME(mout_phyclk_usbdrd300_udrd30_phyclk_user_p) = { "fin_pll",
"phyclk_usbdrd300_udrd30_phyclock" };
-PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_p) = { "fin_pll",
+PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_user_p) = { "fin_pll",
"phyclk_usbdrd300_udrd30_pipe_pclk" };
/* fixed rate clocks used in the FSYS0 block */
-struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = {
+static struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = {
FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL,
CLK_IS_ROOT, 60000000),
FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL,
@@ -824,29 +912,30 @@ static unsigned long fsys0_clk_regs[] __initdata = {
};
static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
- MUX(0, "mout_aclk_fsys0_200_user", mout_aclk_fsys0_200_p,
+ MUX(0, "mout_aclk_fsys0_200_user", mout_aclk_fsys0_200_user_p,
MUX_SEL_FSYS00, 24, 1),
- MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1),
- MUX(0, "mout_sclk_usbdrd300_user", mout_sclk_usbdrd300_p,
+ MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_user_p,
+ MUX_SEL_FSYS01, 24, 1),
+ MUX(0, "mout_sclk_usbdrd300_user", mout_sclk_usbdrd300_user_p,
MUX_SEL_FSYS01, 28, 1),
MUX(0, "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
- mout_phyclk_usbdrd300_udrd30_pipe_pclk_p,
+ mout_phyclk_usbdrd300_udrd30_pipe_pclk_user_p,
MUX_SEL_FSYS02, 24, 1),
MUX(0, "mout_phyclk_usbdrd300_udrd30_phyclk_user",
- mout_phyclk_usbdrd300_udrd30_phyclk_p,
+ mout_phyclk_usbdrd300_udrd30_phyclk_user_p,
MUX_SEL_FSYS02, 28, 1),
};
static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
- GATE(ACLK_AXIUS_USBDRD30X_FSYS0X, "aclk_axius_usbdrd30x_fsys0x",
- "mout_aclk_fsys0_200_user",
- ENABLE_ACLK_FSYS00, 19, 0, 0),
GATE(ACLK_PDMA1, "aclk_pdma1", "mout_aclk_fsys0_200_user",
ENABLE_ACLK_FSYS00, 3, 0, 0),
GATE(ACLK_PDMA0, "aclk_pdma0", "mout_aclk_fsys0_200_user",
ENABLE_ACLK_FSYS00, 4, 0, 0),
+ GATE(ACLK_AXIUS_USBDRD30X_FSYS0X, "aclk_axius_usbdrd30x_fsys0x",
+ "mout_aclk_fsys0_200_user",
+ ENABLE_ACLK_FSYS00, 19, 0, 0),
GATE(ACLK_USBDRD300, "aclk_usbdrd300", "mout_aclk_fsys0_200_user",
ENABLE_ACLK_FSYS01, 29, 0, 0),
@@ -874,11 +963,13 @@ static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
};
static struct samsung_cmu_info fsys0_cmu_info __initdata = {
+ .fixed_clks = fixed_rate_clks_fsys0,
+ .nr_fixed_clks = ARRAY_SIZE(fixed_rate_clks_fsys0),
.mux_clks = fsys0_mux_clks,
.nr_mux_clks = ARRAY_SIZE(fsys0_mux_clks),
.gate_clks = fsys0_gate_clks,
.nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks),
- .nr_clk_ids = TOP1_NR_CLK,
+ .nr_clk_ids = FSYS0_NR_CLK,
.clk_regs = fsys0_clk_regs,
.nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs),
};
@@ -894,42 +985,122 @@ CLK_OF_DECLARE(exynos7_clk_fsys0, "samsung,exynos7-clock-fsys0",
/* Register Offset definitions for CMU_FSYS1 (0x156E0000) */
#define MUX_SEL_FSYS10 0x0200
#define MUX_SEL_FSYS11 0x0204
+#define MUX_SEL_FSYS12 0x0208
+#define DIV_FSYS1 0x0600
#define ENABLE_ACLK_FSYS1 0x0800
+#define ENABLE_PCLK_FSYS1 0x0900
+#define ENABLE_SCLK_FSYS11 0x0A04
+#define ENABLE_SCLK_FSYS12 0x0A08
+#define ENABLE_SCLK_FSYS13 0x0A0C
/*
* List of parent clocks for Muxes in CMU_FSYS1
*/
-PNAME(mout_aclk_fsys1_200_p) = { "fin_pll", "dout_aclk_fsys1_200" };
-PNAME(mout_sclk_mmc0_p) = { "fin_pll", "sclk_mmc0" };
-PNAME(mout_sclk_mmc1_p) = { "fin_pll", "sclk_mmc1" };
+PNAME(mout_aclk_fsys1_200_user_p) = { "fin_pll", "aclk_fsys1_200" };
+PNAME(mout_fsys1_group_p) = { "fin_pll", "fin_pll_26m",
+ "sclk_phy_fsys1_26m" };
+PNAME(mout_sclk_mmc0_user_p) = { "fin_pll", "sclk_mmc0" };
+PNAME(mout_sclk_mmc1_user_p) = { "fin_pll", "sclk_mmc1" };
+PNAME(mout_sclk_ufsunipro20_user_p) = { "fin_pll", "sclk_ufsunipro20" };
+PNAME(mout_phyclk_ufs20_tx0_user_p) = { "fin_pll", "phyclk_ufs20_tx0_symbol" };
+PNAME(mout_phyclk_ufs20_rx0_user_p) = { "fin_pll", "phyclk_ufs20_rx0_symbol" };
+PNAME(mout_phyclk_ufs20_rx1_user_p) = { "fin_pll", "phyclk_ufs20_rx1_symbol" };
+
+/* fixed rate clocks used in the FSYS1 block */
+static struct samsung_fixed_rate_clock fixed_rate_clks_fsys1[] __initdata = {
+ FRATE(PHYCLK_UFS20_TX0_SYMBOL, "phyclk_ufs20_tx0_symbol", NULL,
+ CLK_IS_ROOT, 300000000),
+ FRATE(PHYCLK_UFS20_RX0_SYMBOL, "phyclk_ufs20_rx0_symbol", NULL,
+ CLK_IS_ROOT, 300000000),
+ FRATE(PHYCLK_UFS20_RX1_SYMBOL, "phyclk_ufs20_rx1_symbol", NULL,
+ CLK_IS_ROOT, 300000000),
+};
static unsigned long fsys1_clk_regs[] __initdata = {
MUX_SEL_FSYS10,
MUX_SEL_FSYS11,
+ MUX_SEL_FSYS12,
+ DIV_FSYS1,
ENABLE_ACLK_FSYS1,
+ ENABLE_PCLK_FSYS1,
+ ENABLE_SCLK_FSYS11,
+ ENABLE_SCLK_FSYS12,
+ ENABLE_SCLK_FSYS13,
};
static struct samsung_mux_clock fsys1_mux_clks[] __initdata = {
- MUX(0, "mout_aclk_fsys1_200_user", mout_aclk_fsys1_200_p,
+ MUX(MOUT_FSYS1_PHYCLK_SEL1, "mout_fsys1_phyclk_sel1",
+ mout_fsys1_group_p, MUX_SEL_FSYS10, 16, 2),
+ MUX(0, "mout_fsys1_phyclk_sel0", mout_fsys1_group_p,
+ MUX_SEL_FSYS10, 20, 2),
+ MUX(0, "mout_aclk_fsys1_200_user", mout_aclk_fsys1_200_user_p,
MUX_SEL_FSYS10, 28, 1),
- MUX(0, "mout_sclk_mmc1_user", mout_sclk_mmc1_p, MUX_SEL_FSYS11, 24, 1),
- MUX(0, "mout_sclk_mmc0_user", mout_sclk_mmc0_p, MUX_SEL_FSYS11, 28, 1),
+ MUX(0, "mout_sclk_mmc1_user", mout_sclk_mmc1_user_p,
+ MUX_SEL_FSYS11, 24, 1),
+ MUX(0, "mout_sclk_mmc0_user", mout_sclk_mmc0_user_p,
+ MUX_SEL_FSYS11, 28, 1),
+ MUX(0, "mout_sclk_ufsunipro20_user", mout_sclk_ufsunipro20_user_p,
+ MUX_SEL_FSYS11, 20, 1),
+
+ MUX(0, "mout_phyclk_ufs20_rx1_symbol_user",
+ mout_phyclk_ufs20_rx1_user_p, MUX_SEL_FSYS12, 16, 1),
+ MUX(0, "mout_phyclk_ufs20_rx0_symbol_user",
+ mout_phyclk_ufs20_rx0_user_p, MUX_SEL_FSYS12, 24, 1),
+ MUX(0, "mout_phyclk_ufs20_tx0_symbol_user",
+ mout_phyclk_ufs20_tx0_user_p, MUX_SEL_FSYS12, 28, 1),
+};
+
+static struct samsung_div_clock fsys1_div_clks[] __initdata = {
+ DIV(DOUT_PCLK_FSYS1, "dout_pclk_fsys1", "mout_aclk_fsys1_200_user",
+ DIV_FSYS1, 0, 2),
};
static struct samsung_gate_clock fsys1_gate_clks[] __initdata = {
+ GATE(SCLK_UFSUNIPRO20_USER, "sclk_ufsunipro20_user",
+ "mout_sclk_ufsunipro20_user",
+ ENABLE_SCLK_FSYS11, 20, 0, 0),
+
GATE(ACLK_MMC1, "aclk_mmc1", "mout_aclk_fsys1_200_user",
ENABLE_ACLK_FSYS1, 29, 0, 0),
GATE(ACLK_MMC0, "aclk_mmc0", "mout_aclk_fsys1_200_user",
ENABLE_ACLK_FSYS1, 30, 0, 0),
+
+ GATE(ACLK_UFS20_LINK, "aclk_ufs20_link", "dout_pclk_fsys1",
+ ENABLE_ACLK_FSYS1, 31, 0, 0),
+ GATE(PCLK_GPIO_FSYS1, "pclk_gpio_fsys1", "mout_aclk_fsys1_200_user",
+ ENABLE_PCLK_FSYS1, 30, 0, 0),
+
+ GATE(PHYCLK_UFS20_RX1_SYMBOL_USER, "phyclk_ufs20_rx1_symbol_user",
+ "mout_phyclk_ufs20_rx1_symbol_user",
+ ENABLE_SCLK_FSYS12, 16, 0, 0),
+ GATE(PHYCLK_UFS20_RX0_SYMBOL_USER, "phyclk_ufs20_rx0_symbol_user",
+ "mout_phyclk_ufs20_rx0_symbol_user",
+ ENABLE_SCLK_FSYS12, 24, 0, 0),
+ GATE(PHYCLK_UFS20_TX0_SYMBOL_USER, "phyclk_ufs20_tx0_symbol_user",
+ "mout_phyclk_ufs20_tx0_symbol_user",
+ ENABLE_SCLK_FSYS12, 28, 0, 0),
+
+ GATE(OSCCLK_PHY_CLKOUT_EMBEDDED_COMBO_PHY,
+ "oscclk_phy_clkout_embedded_combo_phy",
+ "fin_pll",
+ ENABLE_SCLK_FSYS12, 4, CLK_IGNORE_UNUSED, 0),
+
+ GATE(SCLK_COMBO_PHY_EMBEDDED_26M, "sclk_combo_phy_embedded_26m",
+ "mout_fsys1_phyclk_sel1",
+ ENABLE_SCLK_FSYS13, 24, CLK_IGNORE_UNUSED, 0),
};
static struct samsung_cmu_info fsys1_cmu_info __initdata = {
+ .fixed_clks = fixed_rate_clks_fsys1,
+ .nr_fixed_clks = ARRAY_SIZE(fixed_rate_clks_fsys1),
.mux_clks = fsys1_mux_clks,
.nr_mux_clks = ARRAY_SIZE(fsys1_mux_clks),
+ .div_clks = fsys1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(fsys1_div_clks),
.gate_clks = fsys1_gate_clks,
.nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks),
- .nr_clk_ids = TOP1_NR_CLK,
+ .nr_clk_ids = FSYS1_NR_CLK,
.clk_regs = fsys1_clk_regs,
.nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs),
};
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c
index b1df7b2f1e97..4abf21172625 100644
--- a/drivers/clk/shmobile/clk-mstp.c
+++ b/drivers/clk/shmobile/clk-mstp.c
@@ -214,7 +214,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
break;
if (clkidx >= MSTP_MAX_CLOCKS) {
- pr_err("%s: invalid clock %s %s index %u)\n",
+ pr_err("%s: invalid clock %s %s index %u\n",
__func__, np->name, name, clkidx);
continue;
}
diff --git a/drivers/clk/shmobile/clk-r8a7778.c b/drivers/clk/shmobile/clk-r8a7778.c
index 87c1d2f2fb57..b1741551fff2 100644
--- a/drivers/clk/shmobile/clk-r8a7778.c
+++ b/drivers/clk/shmobile/clk-r8a7778.c
@@ -20,10 +20,10 @@ struct r8a7778_cpg {
};
/* PLL multipliers per bits 11, 12, and 18 of MODEMR */
-struct {
+static const struct {
unsigned long plla_mult;
unsigned long pllb_mult;
-} r8a7778_rates[] __initdata = {
+} r8a7778_rates[] __initconst = {
[0] = { 21, 21 },
[1] = { 24, 24 },
[2] = { 28, 28 },
@@ -34,10 +34,10 @@ struct {
};
/* Clock dividers per bits 1 and 2 of MODEMR */
-struct {
+static const struct {
const char *name;
unsigned int div[4];
-} r8a7778_divs[6] __initdata = {
+} r8a7778_divs[6] __initconst = {
{ "b", { 12, 12, 16, 18 } },
{ "out", { 12, 12, 16, 18 } },
{ "p", { 16, 12, 16, 12 } },
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
index a98e21fe773a..957aae63e7cc 100644
--- a/drivers/clk/sirf/clk-atlas7.c
+++ b/drivers/clk/sirf/clk-atlas7.c
@@ -205,18 +205,11 @@
#define SIRFSOC_CLKC_LEAF_CLK_EN7_SET 0x0530
#define SIRFSOC_CLKC_LEAF_CLK_EN8_SET 0x0548
-
-static void __iomem *sirfsoc_clk_vbase;
-static struct clk_onecell_data clk_data;
-
-static const struct clk_div_table pll_div_table[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 2 },
- { .val = 2, .div = 4 },
- { .val = 3, .div = 8 },
- { .val = 4, .div = 16 },
- { .val = 5, .div = 32 },
-};
+#define SIRFSOC_NOC_CLK_IDLEREQ_SET 0x02D0
+#define SIRFSOC_NOC_CLK_IDLEREQ_CLR 0x02D4
+#define SIRFSOC_NOC_CLK_SLVRDY_SET 0x02E8
+#define SIRFSOC_NOC_CLK_SLVRDY_CLR 0x02EC
+#define SIRFSOC_NOC_CLK_IDLE_STATUS 0x02F4
struct clk_pll {
struct clk_hw hw;
@@ -231,10 +224,18 @@ struct clk_dto {
};
#define to_dtoclk(_hw) container_of(_hw, struct clk_dto, hw)
+enum clk_unit_type {
+ CLK_UNIT_NOC_OTHER,
+ CLK_UNIT_NOC_CLOCK,
+ CLK_UNIT_NOC_SOCKET,
+};
+
struct clk_unit {
struct clk_hw hw;
u16 regofs;
u16 bit;
+ u32 type;
+ u8 idle_bit;
spinlock_t *lock;
};
#define to_unitclk(_hw) container_of(_hw, struct clk_unit, hw)
@@ -272,6 +273,8 @@ struct atlas7_unit_init_data {
unsigned long flags;
u32 regofs;
u8 bit;
+ u32 type;
+ u8 idle_bit;
spinlock_t *lock;
};
@@ -284,6 +287,18 @@ struct atlas7_reset_desc {
spinlock_t *lock;
};
+static void __iomem *sirfsoc_clk_vbase;
+static struct clk_onecell_data clk_data;
+
+static const struct clk_div_table pll_div_table[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { .val = 4, .div = 16 },
+ { .val = 5, .div = 32 },
+};
+
static DEFINE_SPINLOCK(cpupll_ctrl1_lock);
static DEFINE_SPINLOCK(mempll_ctrl1_lock);
static DEFINE_SPINLOCK(sys0pll_ctrl1_lock);
@@ -1040,148 +1055,148 @@ static struct atlas7_mux_init_data mux_list[] __initdata = {
/* new unit should add start from the tail of list */
static struct atlas7_unit_init_data unit_list[] __initdata = {
/* unit_name, parent_name, flags, regofs, bit, lock */
- { 0, "audmscm_kas", "kas_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 0, &root0_gate_lock },
- { 1, "gnssm_gnss", "gnss_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 1, &root0_gate_lock },
- { 2, "gpum_gpu", "gpu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 2, &root0_gate_lock },
- { 3, "mediam_g2d", "g2d_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 3, &root0_gate_lock },
- { 4, "mediam_jpenc", "jpenc_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 4, &root0_gate_lock },
- { 5, "vdifm_disp0", "disp0_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 5, &root0_gate_lock },
- { 6, "vdifm_disp1", "disp1_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 6, &root0_gate_lock },
- { 7, "audmscm_i2s", "i2s_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 8, &root0_gate_lock },
- { 8, "audmscm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 11, &root0_gate_lock },
- { 9, "vdifm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 12, &root0_gate_lock },
- { 10, "gnssm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 13, &root0_gate_lock },
- { 11, "mediam_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 14, &root0_gate_lock },
- { 12, "btm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 17, &root0_gate_lock },
- { 13, "mediam_sdphy01", "sdphy01_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 18, &root0_gate_lock },
- { 14, "vdifm_sdphy23", "sdphy23_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 19, &root0_gate_lock },
- { 15, "vdifm_sdphy45", "sdphy45_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 20, &root0_gate_lock },
- { 16, "vdifm_sdphy67", "sdphy67_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 21, &root0_gate_lock },
- { 17, "audmscm_xin", "xin", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 22, &root0_gate_lock },
- { 18, "mediam_nand", "nand_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 27, &root0_gate_lock },
- { 19, "gnssm_sec", "sec_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 28, &root0_gate_lock },
- { 20, "cpum_cpu", "cpu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 29, &root0_gate_lock },
- { 21, "gnssm_xin", "xin", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 30, &root0_gate_lock },
- { 22, "vdifm_vip", "vip_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 31, &root0_gate_lock },
- { 23, "btm_btss", "btss_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 0, &root1_gate_lock },
- { 24, "mediam_usbphy", "usbphy_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 1, &root1_gate_lock },
- { 25, "rtcm_kas", "kas_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 2, &root1_gate_lock },
- { 26, "audmscm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 3, &root1_gate_lock },
- { 27, "vdifm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 4, &root1_gate_lock },
- { 28, "gnssm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 5, &root1_gate_lock },
- { 29, "mediam_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 6, &root1_gate_lock },
- { 30, "cpum_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 8, &root1_gate_lock },
- { 31, "gpum_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 9, &root1_gate_lock },
- { 32, "audmscm_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 11, &root1_gate_lock },
- { 33, "vdifm_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 12, &root1_gate_lock },
- { 34, "gnssm_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 13, &root1_gate_lock },
- { 35, "mediam_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 14, &root1_gate_lock },
- { 36, "ddrm_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 15, &root1_gate_lock },
- { 37, "cpum_tpiu", "tpiu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 16, &root1_gate_lock },
- { 38, "gpum_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 17, &root1_gate_lock },
- { 39, "gnssm_rgmii", "rgmii_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 20, &root1_gate_lock },
- { 40, "mediam_vdec", "vdec_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 21, &root1_gate_lock },
- { 41, "gpum_sdr", "sdr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 22, &root1_gate_lock },
- { 42, "vdifm_deint", "deint_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 23, &root1_gate_lock },
- { 43, "gnssm_can", "can_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 26, &root1_gate_lock },
- { 44, "mediam_usb", "usb_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 28, &root1_gate_lock },
- { 45, "gnssm_gmac", "gmac_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 29, &root1_gate_lock },
- { 46, "cvd_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 0, &leaf1_gate_lock },
- { 47, "timer_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 1, &leaf1_gate_lock },
- { 48, "pulse_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 2, &leaf1_gate_lock },
- { 49, "tsc_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 3, &leaf1_gate_lock },
- { 50, "tsc_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 21, &leaf1_gate_lock },
- { 51, "ioctop_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 4, &leaf1_gate_lock },
- { 52, "rsc_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 5, &leaf1_gate_lock },
- { 53, "dvm_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 6, &leaf1_gate_lock },
- { 54, "lvds_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 7, &leaf1_gate_lock },
- { 55, "kas_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 8, &leaf1_gate_lock },
- { 56, "ac97_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 9, &leaf1_gate_lock },
- { 57, "usp0_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 10, &leaf1_gate_lock },
- { 58, "usp1_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 11, &leaf1_gate_lock },
- { 59, "usp2_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 12, &leaf1_gate_lock },
- { 60, "dmac2_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 13, &leaf1_gate_lock },
- { 61, "dmac3_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 14, &leaf1_gate_lock },
- { 62, "audioif_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 15, &leaf1_gate_lock },
- { 63, "i2s1_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 17, &leaf1_gate_lock },
- { 64, "thaudmscm_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 22, &leaf1_gate_lock },
- { 65, "analogtest_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 23, &leaf1_gate_lock },
- { 66, "sys2pci_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 0, &leaf2_gate_lock },
- { 67, "pciarb_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 1, &leaf2_gate_lock },
- { 68, "pcicopy_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 2, &leaf2_gate_lock },
- { 69, "rom_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 3, &leaf2_gate_lock },
- { 70, "sdio23_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 4, &leaf2_gate_lock },
- { 71, "sdio45_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 5, &leaf2_gate_lock },
- { 72, "sdio67_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 6, &leaf2_gate_lock },
- { 73, "vip1_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 7, &leaf2_gate_lock },
- { 74, "vip1_vip", "vdifm_vip", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 16, &leaf2_gate_lock },
- { 75, "sdio23_sdphy23", "vdifm_sdphy23", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 8, &leaf2_gate_lock },
- { 76, "sdio45_sdphy45", "vdifm_sdphy45", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 9, &leaf2_gate_lock },
- { 77, "sdio67_sdphy67", "vdifm_sdphy67", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 10, &leaf2_gate_lock },
- { 78, "vpp0_disp0", "vdifm_disp0", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 11, &leaf2_gate_lock },
- { 79, "lcd0_disp0", "vdifm_disp0", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 12, &leaf2_gate_lock },
- { 80, "vpp1_disp1", "vdifm_disp1", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 13, &leaf2_gate_lock },
- { 81, "lcd1_disp1", "vdifm_disp1", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 14, &leaf2_gate_lock },
- { 82, "dcu_deint", "vdifm_deint", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 15, &leaf2_gate_lock },
- { 83, "vdifm_dapa_r_nocr", "vdifm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 17, &leaf2_gate_lock },
- { 84, "gpio1_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 18, &leaf2_gate_lock },
- { 85, "thvdifm_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 19, &leaf2_gate_lock },
- { 86, "gmac_rgmii", "gnssm_rgmii", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 0, &leaf3_gate_lock },
- { 87, "gmac_gmac", "gnssm_gmac", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 1, &leaf3_gate_lock },
- { 88, "uart1_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 2, &leaf3_gate_lock },
- { 89, "dmac0_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 3, &leaf3_gate_lock },
- { 90, "uart0_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 4, &leaf3_gate_lock },
- { 91, "uart2_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 5, &leaf3_gate_lock },
- { 92, "uart3_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 6, &leaf3_gate_lock },
- { 93, "uart4_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 7, &leaf3_gate_lock },
- { 94, "uart5_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 8, &leaf3_gate_lock },
- { 95, "spi1_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 9, &leaf3_gate_lock },
- { 96, "gnss_gnss", "gnssm_gnss", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 10, &leaf3_gate_lock },
- { 97, "canbus1_can", "gnssm_can", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 12, &leaf3_gate_lock },
- { 98, "ccsec_sec", "gnssm_sec", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 15, &leaf3_gate_lock },
- { 99, "ccpub_sec", "gnssm_sec", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 16, &leaf3_gate_lock },
- { 100, "gnssm_dapa_r_nocr", "gnssm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 13, &leaf3_gate_lock },
- { 101, "thgnssm_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 14, &leaf3_gate_lock },
- { 102, "media_vdec", "mediam_vdec", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 0, &leaf4_gate_lock },
- { 103, "media_jpenc", "mediam_jpenc", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 1, &leaf4_gate_lock },
- { 104, "g2d_g2d", "mediam_g2d", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 2, &leaf4_gate_lock },
- { 105, "i2c0_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 3, &leaf4_gate_lock },
- { 106, "i2c1_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 4, &leaf4_gate_lock },
- { 107, "gpio0_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 5, &leaf4_gate_lock },
- { 108, "nand_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 6, &leaf4_gate_lock },
- { 109, "sdio01_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 7, &leaf4_gate_lock },
- { 110, "sys2pci2_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 8, &leaf4_gate_lock },
- { 111, "sdio01_sdphy01", "mediam_sdphy01", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 9, &leaf4_gate_lock },
- { 112, "nand_nand", "mediam_nand", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 10, &leaf4_gate_lock },
- { 113, "usb0_usb", "mediam_usb", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 11, &leaf4_gate_lock },
- { 114, "usb1_usb", "mediam_usb", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 12, &leaf4_gate_lock },
- { 115, "usbphy0_usbphy", "mediam_usbphy", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 13, &leaf4_gate_lock },
- { 116, "usbphy1_usbphy", "mediam_usbphy", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 14, &leaf4_gate_lock },
- { 117, "thmediam_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 15, &leaf4_gate_lock },
- { 118, "memc_mem", "mempll_clk1", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 0, &leaf5_gate_lock },
- { 119, "dapa_mem", "mempll_clk1", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 1, &leaf5_gate_lock },
- { 120, "nocddrm_nocr", "ddrm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 2, &leaf5_gate_lock },
- { 121, "thddrm_nocr", "ddrm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 3, &leaf5_gate_lock },
- { 122, "spram1_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 0, &leaf6_gate_lock },
- { 123, "spram2_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 1, &leaf6_gate_lock },
- { 124, "coresight_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 2, &leaf6_gate_lock },
- { 125, "coresight_tpiu", "cpum_tpiu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 3, &leaf6_gate_lock },
- { 126, "graphic_gpu", "gpum_gpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 0, &leaf7_gate_lock },
- { 127, "vss_sdr", "gpum_sdr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 1, &leaf7_gate_lock },
- { 128, "thgpum_nocr", "gpum_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 2, &leaf7_gate_lock },
- { 129, "a7ca_btss", "btm_btss", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 1, &leaf8_gate_lock },
- { 130, "dmac4_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 2, &leaf8_gate_lock },
- { 131, "uart6_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 3, &leaf8_gate_lock },
- { 132, "usp3_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 4, &leaf8_gate_lock },
- { 133, "a7ca_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 5, &leaf8_gate_lock },
- { 134, "noc_btm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 6, &leaf8_gate_lock },
- { 135, "thbtm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 7, &leaf8_gate_lock },
- { 136, "btslow", "xinw_fixdiv_btslow", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 25, &root1_gate_lock },
- { 137, "a7ca_btslow", "btslow", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 0, &leaf8_gate_lock },
- { 138, "pwm_io", "io_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 0, &leaf0_gate_lock },
- { 139, "pwm_xin", "xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 1, &leaf0_gate_lock },
- { 140, "pwm_xinw", "xinw", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 2, &leaf0_gate_lock },
- { 141, "thcgum_sys", "sys_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 3, &leaf0_gate_lock },
+ { 0, "audmscm_kas", "kas_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 0, 0, 0, &root0_gate_lock },
+ { 1, "gnssm_gnss", "gnss_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 1, 0, 0, &root0_gate_lock },
+ { 2, "gpum_gpu", "gpu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 2, 0, 0, &root0_gate_lock },
+ { 3, "mediam_g2d", "g2d_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 3, 0, 0, &root0_gate_lock },
+ { 4, "mediam_jpenc", "jpenc_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 4, 0, 0, &root0_gate_lock },
+ { 5, "vdifm_disp0", "disp0_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 5, 0, 0, &root0_gate_lock },
+ { 6, "vdifm_disp1", "disp1_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 6, 0, 0, &root0_gate_lock },
+ { 7, "audmscm_i2s", "i2s_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 8, 0, 0, &root0_gate_lock },
+ { 8, "audmscm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 11, 0, 0, &root0_gate_lock },
+ { 9, "vdifm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 12, 0, 0, &root0_gate_lock },
+ { 10, "gnssm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 13, 0, 0, &root0_gate_lock },
+ { 11, "mediam_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 14, 0, 0, &root0_gate_lock },
+ { 12, "btm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 17, 0, 0, &root0_gate_lock },
+ { 13, "mediam_sdphy01", "sdphy01_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 18, 0, 0, &root0_gate_lock },
+ { 14, "vdifm_sdphy23", "sdphy23_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 19, 0, 0, &root0_gate_lock },
+ { 15, "vdifm_sdphy45", "sdphy45_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 20, 0, 0, &root0_gate_lock },
+ { 16, "vdifm_sdphy67", "sdphy67_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 21, 0, 0, &root0_gate_lock },
+ { 17, "audmscm_xin", "xin", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 22, 0, 0, &root0_gate_lock },
+ { 18, "mediam_nand", "nand_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 27, 0, 0, &root0_gate_lock },
+ { 19, "gnssm_sec", "sec_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 28, 0, 0, &root0_gate_lock },
+ { 20, "cpum_cpu", "cpu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 29, 0, 0, &root0_gate_lock },
+ { 21, "gnssm_xin", "xin", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 30, 0, 0, &root0_gate_lock },
+ { 22, "vdifm_vip", "vip_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 31, 0, 0, &root0_gate_lock },
+ { 23, "btm_btss", "btss_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 0, 0, 0, &root1_gate_lock },
+ { 24, "mediam_usbphy", "usbphy_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 1, 0, 0, &root1_gate_lock },
+ { 25, "rtcm_kas", "kas_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 2, 0, 0, &root1_gate_lock },
+ { 26, "audmscm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 3, 0, 0, &root1_gate_lock },
+ { 27, "vdifm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 4, 0, 0, &root1_gate_lock },
+ { 28, "gnssm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 5, 0, 0, &root1_gate_lock },
+ { 29, "mediam_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 6, 0, 0, &root1_gate_lock },
+ { 30, "cpum_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 8, 0, 0, &root1_gate_lock },
+ { 31, "gpum_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 9, 0, 0, &root1_gate_lock },
+ { 32, "audmscm_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 11, 0, 0, &root1_gate_lock },
+ { 33, "vdifm_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 12, 0, 0, &root1_gate_lock },
+ { 34, "gnssm_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 13, 0, 0, &root1_gate_lock },
+ { 35, "mediam_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 14, 0, 0, &root1_gate_lock },
+ { 36, "ddrm_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 15, 0, 0, &root1_gate_lock },
+ { 37, "cpum_tpiu", "tpiu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 16, 0, 0, &root1_gate_lock },
+ { 38, "gpum_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 17, 0, 0, &root1_gate_lock },
+ { 39, "gnssm_rgmii", "rgmii_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 20, 0, 0, &root1_gate_lock },
+ { 40, "mediam_vdec", "vdec_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 21, 0, 0, &root1_gate_lock },
+ { 41, "gpum_sdr", "sdr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 22, 0, 0, &root1_gate_lock },
+ { 42, "vdifm_deint", "deint_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 23, 0, 0, &root1_gate_lock },
+ { 43, "gnssm_can", "can_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 26, 0, 0, &root1_gate_lock },
+ { 44, "mediam_usb", "usb_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 28, 0, 0, &root1_gate_lock },
+ { 45, "gnssm_gmac", "gmac_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 29, 0, 0, &root1_gate_lock },
+ { 46, "cvd_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 0, CLK_UNIT_NOC_CLOCK, 4, &leaf1_gate_lock },
+ { 47, "timer_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 1, 0, 0, &leaf1_gate_lock },
+ { 48, "pulse_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 2, 0, 0, &leaf1_gate_lock },
+ { 49, "tsc_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 3, 0, 0, &leaf1_gate_lock },
+ { 50, "tsc_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 21, 0, 0, &leaf1_gate_lock },
+ { 51, "ioctop_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 4, 0, 0, &leaf1_gate_lock },
+ { 52, "rsc_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 5, 0, 0, &leaf1_gate_lock },
+ { 53, "dvm_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 6, CLK_UNIT_NOC_SOCKET, 7, &leaf1_gate_lock },
+ { 54, "lvds_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 7, CLK_UNIT_NOC_SOCKET, 8, &leaf1_gate_lock },
+ { 55, "kas_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 8, CLK_UNIT_NOC_CLOCK, 2, &leaf1_gate_lock },
+ { 56, "ac97_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 9, 0, 0, &leaf1_gate_lock },
+ { 57, "usp0_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 10, CLK_UNIT_NOC_SOCKET, 4, &leaf1_gate_lock },
+ { 58, "usp1_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 11, CLK_UNIT_NOC_SOCKET, 5, &leaf1_gate_lock },
+ { 59, "usp2_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 12, CLK_UNIT_NOC_SOCKET, 6, &leaf1_gate_lock },
+ { 60, "dmac2_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 13, CLK_UNIT_NOC_SOCKET, 1, &leaf1_gate_lock },
+ { 61, "dmac3_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 14, CLK_UNIT_NOC_SOCKET, 2, &leaf1_gate_lock },
+ { 62, "audioif_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 15, CLK_UNIT_NOC_SOCKET, 0, &leaf1_gate_lock },
+ { 63, "i2s1_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 17, CLK_UNIT_NOC_CLOCK, 2, &leaf1_gate_lock },
+ { 64, "thaudmscm_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 22, 0, 0, &leaf1_gate_lock },
+ { 65, "analogtest_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 23, 0, 0, &leaf1_gate_lock },
+ { 66, "sys2pci_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 0, CLK_UNIT_NOC_CLOCK, 20, &leaf2_gate_lock },
+ { 67, "pciarb_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 1, 0, 0, &leaf2_gate_lock },
+ { 68, "pcicopy_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 2, 0, 0, &leaf2_gate_lock },
+ { 69, "rom_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 3, 0, 0, &leaf2_gate_lock },
+ { 70, "sdio23_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 4, 0, 0, &leaf2_gate_lock },
+ { 71, "sdio45_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 5, 0, 0, &leaf2_gate_lock },
+ { 72, "sdio67_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 6, 0, 0, &leaf2_gate_lock },
+ { 73, "vip1_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 7, 0, 0, &leaf2_gate_lock },
+ { 74, "vip1_vip", "vdifm_vip", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 16, CLK_UNIT_NOC_CLOCK, 21, &leaf2_gate_lock },
+ { 75, "sdio23_sdphy23", "vdifm_sdphy23", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 8, 0, 0, &leaf2_gate_lock },
+ { 76, "sdio45_sdphy45", "vdifm_sdphy45", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 9, 0, 0, &leaf2_gate_lock },
+ { 77, "sdio67_sdphy67", "vdifm_sdphy67", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 10, 0, 0, &leaf2_gate_lock },
+ { 78, "vpp0_disp0", "vdifm_disp0", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 11, CLK_UNIT_NOC_CLOCK, 22, &leaf2_gate_lock },
+ { 79, "lcd0_disp0", "vdifm_disp0", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 12, CLK_UNIT_NOC_CLOCK, 18, &leaf2_gate_lock },
+ { 80, "vpp1_disp1", "vdifm_disp1", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 13, CLK_UNIT_NOC_CLOCK, 23, &leaf2_gate_lock },
+ { 81, "lcd1_disp1", "vdifm_disp1", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 14, CLK_UNIT_NOC_CLOCK, 19, &leaf2_gate_lock },
+ { 82, "dcu_deint", "vdifm_deint", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 15, CLK_UNIT_NOC_CLOCK, 17, &leaf2_gate_lock },
+ { 83, "vdifm_dapa_r_nocr", "vdifm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 17, 0, 0, &leaf2_gate_lock },
+ { 84, "gpio1_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 18, 0, 0, &leaf2_gate_lock },
+ { 85, "thvdifm_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 19, 0, 0, &leaf2_gate_lock },
+ { 86, "gmac_rgmii", "gnssm_rgmii", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 0, 0, 0, &leaf3_gate_lock },
+ { 87, "gmac_gmac", "gnssm_gmac", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 1, CLK_UNIT_NOC_CLOCK, 10, &leaf3_gate_lock },
+ { 88, "uart1_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 2, CLK_UNIT_NOC_SOCKET, 14, &leaf3_gate_lock },
+ { 89, "dmac0_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 3, CLK_UNIT_NOC_SOCKET, 11, &leaf3_gate_lock },
+ { 90, "uart0_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 4, CLK_UNIT_NOC_SOCKET, 13, &leaf3_gate_lock },
+ { 91, "uart2_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 5, CLK_UNIT_NOC_SOCKET, 15, &leaf3_gate_lock },
+ { 92, "uart3_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 6, CLK_UNIT_NOC_SOCKET, 16, &leaf3_gate_lock },
+ { 93, "uart4_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 7, CLK_UNIT_NOC_SOCKET, 17, &leaf3_gate_lock },
+ { 94, "uart5_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 8, CLK_UNIT_NOC_SOCKET, 18, &leaf3_gate_lock },
+ { 95, "spi1_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 9, CLK_UNIT_NOC_SOCKET, 12, &leaf3_gate_lock },
+ { 96, "gnss_gnss", "gnssm_gnss", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 10, 0, 0, &leaf3_gate_lock },
+ { 97, "canbus1_can", "gnssm_can", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 12, CLK_UNIT_NOC_CLOCK, 7, &leaf3_gate_lock },
+ { 98, "ccsec_sec", "gnssm_sec", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 15, CLK_UNIT_NOC_CLOCK, 9, &leaf3_gate_lock },
+ { 99, "ccpub_sec", "gnssm_sec", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 16, CLK_UNIT_NOC_CLOCK, 8, &leaf3_gate_lock },
+ { 100, "gnssm_dapa_r_nocr", "gnssm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 13, 0, 0, &leaf3_gate_lock },
+ { 101, "thgnssm_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 14, 0, 0, &leaf3_gate_lock },
+ { 102, "media_vdec", "mediam_vdec", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 0, CLK_UNIT_NOC_CLOCK, 3, &leaf4_gate_lock },
+ { 103, "media_jpenc", "mediam_jpenc", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 1, CLK_UNIT_NOC_CLOCK, 1, &leaf4_gate_lock },
+ { 104, "g2d_g2d", "mediam_g2d", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 2, CLK_UNIT_NOC_CLOCK, 12, &leaf4_gate_lock },
+ { 105, "i2c0_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 3, CLK_UNIT_NOC_SOCKET, 21, &leaf4_gate_lock },
+ { 106, "i2c1_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 4, CLK_UNIT_NOC_SOCKET, 20, &leaf4_gate_lock },
+ { 107, "gpio0_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 5, CLK_UNIT_NOC_SOCKET, 19, &leaf4_gate_lock },
+ { 108, "nand_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 6, 0, 0, &leaf4_gate_lock },
+ { 109, "sdio01_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 7, 0, 0, &leaf4_gate_lock },
+ { 110, "sys2pci2_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 8, CLK_UNIT_NOC_CLOCK, 13, &leaf4_gate_lock },
+ { 111, "sdio01_sdphy01", "mediam_sdphy01", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 9, 0, 0, &leaf4_gate_lock },
+ { 112, "nand_nand", "mediam_nand", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 10, CLK_UNIT_NOC_CLOCK, 14, &leaf4_gate_lock },
+ { 113, "usb0_usb", "mediam_usb", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 11, CLK_UNIT_NOC_CLOCK, 15, &leaf4_gate_lock },
+ { 114, "usb1_usb", "mediam_usb", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 12, CLK_UNIT_NOC_CLOCK, 16, &leaf4_gate_lock },
+ { 115, "usbphy0_usbphy", "mediam_usbphy", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 13, 0, 0, &leaf4_gate_lock },
+ { 116, "usbphy1_usbphy", "mediam_usbphy", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 14, 0, 0, &leaf4_gate_lock },
+ { 117, "thmediam_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 15, 0, 0, &leaf4_gate_lock },
+ { 118, "memc_mem", "mempll_clk1", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 0, 0, 0, &leaf5_gate_lock },
+ { 119, "dapa_mem", "mempll_clk1", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 1, 0, 0, &leaf5_gate_lock },
+ { 120, "nocddrm_nocr", "ddrm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 2, 0, 0, &leaf5_gate_lock },
+ { 121, "thddrm_nocr", "ddrm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 3, 0, 0, &leaf5_gate_lock },
+ { 122, "spram1_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 0, CLK_UNIT_NOC_SOCKET, 9, &leaf6_gate_lock },
+ { 123, "spram2_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 1, CLK_UNIT_NOC_SOCKET, 10, &leaf6_gate_lock },
+ { 124, "coresight_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 2, 0, 0, &leaf6_gate_lock },
+ { 125, "coresight_tpiu", "cpum_tpiu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 3, 0, 0, &leaf6_gate_lock },
+ { 126, "graphic_gpu", "gpum_gpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 0, CLK_UNIT_NOC_CLOCK, 0, &leaf7_gate_lock },
+ { 127, "vss_sdr", "gpum_sdr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 1, CLK_UNIT_NOC_CLOCK, 11, &leaf7_gate_lock },
+ { 128, "thgpum_nocr", "gpum_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 2, 0, 0, &leaf7_gate_lock },
+ { 129, "a7ca_btss", "btm_btss", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 1, 0, 0, &leaf8_gate_lock },
+ { 130, "dmac4_io", "a7ca_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 2, 0, 0, &leaf8_gate_lock },
+ { 131, "uart6_io", "dmac4_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 3, 0, 0, &leaf8_gate_lock },
+ { 132, "usp3_io", "dmac4_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 4, 0, 0, &leaf8_gate_lock },
+ { 133, "a7ca_io", "noc_btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 5, 0, 0, &leaf8_gate_lock },
+ { 134, "noc_btm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 6, 0, 0, &leaf8_gate_lock },
+ { 135, "thbtm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 7, 0, 0, &leaf8_gate_lock },
+ { 136, "btslow", "xinw_fixdiv_btslow", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 25, 0, 0, &root1_gate_lock },
+ { 137, "a7ca_btslow", "btslow", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 0, 0, 0, &leaf8_gate_lock },
+ { 138, "pwm_io", "io_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 0, 0, 0, &leaf0_gate_lock },
+ { 139, "pwm_xin", "xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 1, 0, 0, &leaf0_gate_lock },
+ { 140, "pwm_xinw", "xinw", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 2, 0, 0, &leaf0_gate_lock },
+ { 141, "thcgum_sys", "sys_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 3, 0, 0, &leaf0_gate_lock },
};
static struct clk *atlas7_clks[ARRAY_SIZE(unit_list) + ARRAY_SIZE(mux_list)];
@@ -1206,20 +1221,44 @@ static int unit_clk_enable(struct clk_hw *hw)
spin_lock_irqsave(clk->lock, flags);
clkc_writel(BIT(clk->bit), reg);
+ if (clk->type == CLK_UNIT_NOC_CLOCK)
+ clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_IDLEREQ_CLR);
+ else if (clk->type == CLK_UNIT_NOC_SOCKET)
+ clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_SLVRDY_SET);
+
spin_unlock_irqrestore(clk->lock, flags);
return 0;
}
static void unit_clk_disable(struct clk_hw *hw)
{
- u32 reg;
+ u32 reg;
+ u32 i = 0;
struct clk_unit *clk = to_unitclk(hw);
unsigned long flags;
reg = clk->regofs + SIRFSOC_CLKC_ROOT_CLK_EN0_CLR - SIRFSOC_CLKC_ROOT_CLK_EN0_SET;
-
spin_lock_irqsave(clk->lock, flags);
+ if (clk->type == CLK_UNIT_NOC_CLOCK) {
+ clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_IDLEREQ_SET);
+ while (!(clkc_readl(SIRFSOC_NOC_CLK_IDLE_STATUS) &
+ BIT(clk->idle_bit)) && (i++ < 100)) {
+ cpu_relax();
+ udelay(10);
+ }
+
+ if (i == 100) {
+ pr_err("unit NoC Clock disconnect Error:timeout\n");
+ /*once timeout, undo idlereq by CLR*/
+ clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_IDLEREQ_CLR);
+ goto err;
+ }
+
+ } else if (clk->type == CLK_UNIT_NOC_SOCKET)
+ clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_SLVRDY_CLR);
+
clkc_writel(BIT(clk->bit), reg);
+err:
spin_unlock_irqrestore(clk->lock, flags);
}
@@ -1232,7 +1271,7 @@ static const struct clk_ops unit_clk_ops = {
static struct clk * __init
atlas7_unit_clk_register(struct device *dev, const char *name,
const char * const parent_name, unsigned long flags,
- u32 regofs, u8 bit, spinlock_t *lock)
+ u32 regofs, u8 bit, u32 type, u8 idle_bit, spinlock_t *lock)
{
struct clk *clk;
struct clk_unit *unit;
@@ -1251,6 +1290,9 @@ atlas7_unit_clk_register(struct device *dev, const char *name,
unit->hw.init = &init;
unit->regofs = regofs;
unit->bit = bit;
+
+ unit->type = type;
+ unit->idle_bit = idle_bit;
unit->lock = lock;
clk = clk_register(dev, &unit->hw);
@@ -1624,7 +1666,7 @@ static void __init atlas7_clk_init(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(unit_list); i++) {
unit = &unit_list[i];
atlas7_clks[i] = atlas7_unit_clk_register(NULL, unit->unit_name, unit->parent_name,
- unit->flags, unit->regofs, unit->bit, unit->lock);
+ unit->flags, unit->regofs, unit->bit, unit->type, unit->idle_bit, unit->lock);
BUG_ON(!atlas7_clks[i]);
}
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index bd355ee33766..24d99594c0b3 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -268,6 +268,7 @@ static void __init st_of_flexgen_setup(struct device_node *np)
int num_parents, i;
spinlock_t *rlock = NULL;
unsigned long flex_flags = 0;
+ int ret;
pnode = of_get_parent(np);
if (!pnode)
@@ -285,13 +286,13 @@ static void __init st_of_flexgen_setup(struct device_node *np)
if (!clk_data)
goto err;
- clk_data->clk_num = of_property_count_strings(np ,
- "clock-output-names");
- if (clk_data->clk_num <= 0) {
+ ret = of_property_count_strings(np, "clock-output-names");
+ if (ret <= 0) {
pr_err("%s: Failed to get number of output clocks (%d)",
__func__, clk_data->clk_num);
goto err;
}
+ clk_data->clk_num = ret;
clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
GFP_KERNEL);
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 4f7f6c00b219..5dc5ce217960 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -17,6 +17,7 @@
#include <linux/of_address.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include "clkgen.h"
static DEFINE_SPINLOCK(clkgena_divmux_lock);
static DEFINE_SPINLOCK(clkgenf_lock);
@@ -576,6 +577,7 @@ static struct clkgen_mux_data stih415_a9_mux_data = {
.offset = 0,
.shift = 1,
.width = 2,
+ .lock = &clkgen_a9_lock,
};
static struct clkgen_mux_data stih416_a9_mux_data = {
.offset = 0,
@@ -586,6 +588,7 @@ static struct clkgen_mux_data stih407_a9_mux_data = {
.offset = 0x1a4,
.shift = 0,
.width = 2,
+ .lock = &clkgen_a9_lock,
};
static const struct of_device_id mux_of_match[] = {
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index b2a332cf8985..38f6f3a9098e 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -18,10 +18,12 @@
#include <linux/of_address.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
#include "clkgen.h"
static DEFINE_SPINLOCK(clkgena_c32_odf_lock);
+DEFINE_SPINLOCK(clkgen_a9_lock);
/*
* Common PLL configuration register bits for PLL800 and PLL1600 C65
@@ -38,30 +40,46 @@ static DEFINE_SPINLOCK(clkgena_c32_odf_lock);
#define C32_IDF_MASK (0x7)
#define C32_ODF_MASK (0x3f)
#define C32_LDF_MASK (0x7f)
+#define C32_CP_MASK (0x1f)
#define C32_MAX_ODFS (4)
+/*
+ * PLL configuration register bits for PLL4600 C28
+ */
+#define C28_NDIV_MASK (0xff)
+#define C28_IDF_MASK (0x7)
+#define C28_ODF_MASK (0x3f)
+
struct clkgen_pll_data {
struct clkgen_field pdn_status;
+ struct clkgen_field pdn_ctrl;
struct clkgen_field locked_status;
struct clkgen_field mdiv;
struct clkgen_field ndiv;
struct clkgen_field pdiv;
struct clkgen_field idf;
struct clkgen_field ldf;
+ struct clkgen_field cp;
unsigned int num_odfs;
struct clkgen_field odf[C32_MAX_ODFS];
struct clkgen_field odf_gate[C32_MAX_ODFS];
+ bool switch2pll_en;
+ struct clkgen_field switch2pll;
+ spinlock_t *lock;
const struct clk_ops *ops;
};
static const struct clk_ops st_pll1600c65_ops;
static const struct clk_ops st_pll800c65_ops;
static const struct clk_ops stm_pll3200c32_ops;
+static const struct clk_ops stm_pll3200c32_a9_ops;
static const struct clk_ops st_pll1200c32_ops;
+static const struct clk_ops stm_pll4600c28_ops;
static const struct clkgen_pll_data st_pll1600c65_ax = {
.pdn_status = CLKGEN_FIELD(0x0, 0x1, 19),
+ .pdn_ctrl = CLKGEN_FIELD(0x10, 0x1, 0),
.locked_status = CLKGEN_FIELD(0x0, 0x1, 31),
.mdiv = CLKGEN_FIELD(0x0, C65_MDIV_PLL1600_MASK, 0),
.ndiv = CLKGEN_FIELD(0x0, C65_NDIV_MASK, 8),
@@ -70,6 +88,7 @@ static const struct clkgen_pll_data st_pll1600c65_ax = {
static const struct clkgen_pll_data st_pll800c65_ax = {
.pdn_status = CLKGEN_FIELD(0x0, 0x1, 19),
+ .pdn_ctrl = CLKGEN_FIELD(0xC, 0x1, 1),
.locked_status = CLKGEN_FIELD(0x0, 0x1, 31),
.mdiv = CLKGEN_FIELD(0x0, C65_MDIV_PLL800_MASK, 0),
.ndiv = CLKGEN_FIELD(0x0, C65_NDIV_MASK, 8),
@@ -79,6 +98,7 @@ static const struct clkgen_pll_data st_pll800c65_ax = {
static const struct clkgen_pll_data st_pll3200c32_a1x_0 = {
.pdn_status = CLKGEN_FIELD(0x0, 0x1, 31),
+ .pdn_ctrl = CLKGEN_FIELD(0x18, 0x1, 0),
.locked_status = CLKGEN_FIELD(0x4, 0x1, 31),
.ndiv = CLKGEN_FIELD(0x0, C32_NDIV_MASK, 0x0),
.idf = CLKGEN_FIELD(0x4, C32_IDF_MASK, 0x0),
@@ -96,6 +116,7 @@ static const struct clkgen_pll_data st_pll3200c32_a1x_0 = {
static const struct clkgen_pll_data st_pll3200c32_a1x_1 = {
.pdn_status = CLKGEN_FIELD(0xC, 0x1, 31),
+ .pdn_ctrl = CLKGEN_FIELD(0x18, 0x1, 1),
.locked_status = CLKGEN_FIELD(0x10, 0x1, 31),
.ndiv = CLKGEN_FIELD(0xC, C32_NDIV_MASK, 0x0),
.idf = CLKGEN_FIELD(0x10, C32_IDF_MASK, 0x0),
@@ -114,6 +135,7 @@ static const struct clkgen_pll_data st_pll3200c32_a1x_1 = {
/* 415 specific */
static const struct clkgen_pll_data st_pll3200c32_a9_415 = {
.pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
+ .pdn_ctrl = CLKGEN_FIELD(0x0, 0x1, 0),
.locked_status = CLKGEN_FIELD(0x6C, 0x1, 0),
.ndiv = CLKGEN_FIELD(0x0, C32_NDIV_MASK, 9),
.idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 22),
@@ -125,6 +147,7 @@ static const struct clkgen_pll_data st_pll3200c32_a9_415 = {
static const struct clkgen_pll_data st_pll3200c32_ddr_415 = {
.pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
+ .pdn_ctrl = CLKGEN_FIELD(0x0, 0x1, 0),
.locked_status = CLKGEN_FIELD(0x100, 0x1, 0),
.ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0),
.idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25),
@@ -137,7 +160,8 @@ static const struct clkgen_pll_data st_pll3200c32_ddr_415 = {
};
static const struct clkgen_pll_data st_pll1200c32_gpu_415 = {
- .pdn_status = CLKGEN_FIELD(0x144, 0x1, 3),
+ .pdn_status = CLKGEN_FIELD(0x4, 0x1, 0),
+ .pdn_ctrl = CLKGEN_FIELD(0x4, 0x1, 0),
.locked_status = CLKGEN_FIELD(0x168, 0x1, 0),
.ldf = CLKGEN_FIELD(0x0, C32_LDF_MASK, 3),
.idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 0),
@@ -149,6 +173,7 @@ static const struct clkgen_pll_data st_pll1200c32_gpu_415 = {
/* 416 specific */
static const struct clkgen_pll_data st_pll3200c32_a9_416 = {
.pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
+ .pdn_ctrl = CLKGEN_FIELD(0x0, 0x1, 0),
.locked_status = CLKGEN_FIELD(0x6C, 0x1, 0),
.ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0),
.idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25),
@@ -160,6 +185,7 @@ static const struct clkgen_pll_data st_pll3200c32_a9_416 = {
static const struct clkgen_pll_data st_pll3200c32_ddr_416 = {
.pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
+ .pdn_ctrl = CLKGEN_FIELD(0x0, 0x1, 0),
.locked_status = CLKGEN_FIELD(0x10C, 0x1, 0),
.ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0),
.idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25),
@@ -173,6 +199,7 @@ static const struct clkgen_pll_data st_pll3200c32_ddr_416 = {
static const struct clkgen_pll_data st_pll1200c32_gpu_416 = {
.pdn_status = CLKGEN_FIELD(0x8E4, 0x1, 3),
+ .pdn_ctrl = CLKGEN_FIELD(0x8E4, 0x1, 3),
.locked_status = CLKGEN_FIELD(0x90C, 0x1, 0),
.ldf = CLKGEN_FIELD(0x0, C32_LDF_MASK, 3),
.idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 0),
@@ -184,6 +211,7 @@ static const struct clkgen_pll_data st_pll1200c32_gpu_416 = {
static const struct clkgen_pll_data st_pll3200c32_407_a0 = {
/* 407 A0 */
.pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8),
+ .pdn_ctrl = CLKGEN_FIELD(0x2a0, 0x1, 8),
.locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24),
.ndiv = CLKGEN_FIELD(0x2a4, C32_NDIV_MASK, 16),
.idf = CLKGEN_FIELD(0x2a4, C32_IDF_MASK, 0x0),
@@ -196,6 +224,7 @@ static const struct clkgen_pll_data st_pll3200c32_407_a0 = {
static const struct clkgen_pll_data st_pll3200c32_cx_0 = {
/* 407 C0 PLL0 */
.pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8),
+ .pdn_ctrl = CLKGEN_FIELD(0x2a0, 0x1, 8),
.locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24),
.ndiv = CLKGEN_FIELD(0x2a4, C32_NDIV_MASK, 16),
.idf = CLKGEN_FIELD(0x2a4, C32_IDF_MASK, 0x0),
@@ -208,6 +237,7 @@ static const struct clkgen_pll_data st_pll3200c32_cx_0 = {
static const struct clkgen_pll_data st_pll3200c32_cx_1 = {
/* 407 C0 PLL1 */
.pdn_status = CLKGEN_FIELD(0x2c8, 0x1, 8),
+ .pdn_ctrl = CLKGEN_FIELD(0x2c8, 0x1, 8),
.locked_status = CLKGEN_FIELD(0x2c8, 0x1, 24),
.ndiv = CLKGEN_FIELD(0x2cc, C32_NDIV_MASK, 16),
.idf = CLKGEN_FIELD(0x2cc, C32_IDF_MASK, 0x0),
@@ -220,13 +250,34 @@ static const struct clkgen_pll_data st_pll3200c32_cx_1 = {
static const struct clkgen_pll_data st_pll3200c32_407_a9 = {
/* 407 A9 */
.pdn_status = CLKGEN_FIELD(0x1a8, 0x1, 0),
+ .pdn_ctrl = CLKGEN_FIELD(0x1a8, 0x1, 0),
.locked_status = CLKGEN_FIELD(0x87c, 0x1, 0),
.ndiv = CLKGEN_FIELD(0x1b0, C32_NDIV_MASK, 0),
.idf = CLKGEN_FIELD(0x1a8, C32_IDF_MASK, 25),
.num_odfs = 1,
.odf = { CLKGEN_FIELD(0x1b0, C32_ODF_MASK, 8) },
.odf_gate = { CLKGEN_FIELD(0x1ac, 0x1, 28) },
- .ops = &stm_pll3200c32_ops,
+ .switch2pll_en = true,
+ .cp = CLKGEN_FIELD(0x1a8, C32_CP_MASK, 1),
+ .switch2pll = CLKGEN_FIELD(0x1a4, 0x1, 1),
+ .lock = &clkgen_a9_lock,
+ .ops = &stm_pll3200c32_a9_ops,
+};
+
+static struct clkgen_pll_data st_pll4600c28_418_a9 = {
+ /* 418 A9 */
+ .pdn_status = CLKGEN_FIELD(0x1a8, 0x1, 0),
+ .pdn_ctrl = CLKGEN_FIELD(0x1a8, 0x1, 0),
+ .locked_status = CLKGEN_FIELD(0x87c, 0x1, 0),
+ .ndiv = CLKGEN_FIELD(0x1b0, C28_NDIV_MASK, 0),
+ .idf = CLKGEN_FIELD(0x1a8, C28_IDF_MASK, 25),
+ .num_odfs = 1,
+ .odf = { CLKGEN_FIELD(0x1b0, C28_ODF_MASK, 8) },
+ .odf_gate = { CLKGEN_FIELD(0x1ac, 0x1, 28) },
+ .switch2pll_en = true,
+ .switch2pll = CLKGEN_FIELD(0x1a4, 0x1, 1),
+ .lock = &clkgen_a9_lock,
+ .ops = &stm_pll4600c28_ops,
};
/**
@@ -252,10 +303,26 @@ struct clkgen_pll {
struct clk_hw hw;
struct clkgen_pll_data *data;
void __iomem *regs_base;
+ spinlock_t *lock;
+
+ u32 ndiv;
+ u32 idf;
+ u32 odf;
+ u32 cp;
};
#define to_clkgen_pll(_hw) container_of(_hw, struct clkgen_pll, hw)
+struct stm_pll {
+ unsigned long mdiv;
+ unsigned long ndiv;
+ unsigned long pdiv;
+ unsigned long odf;
+ unsigned long idf;
+ unsigned long ldf;
+ unsigned long cp;
+};
+
static int clkgen_pll_is_locked(struct clk_hw *hw)
{
struct clkgen_pll *pll = to_clkgen_pll(hw);
@@ -271,6 +338,78 @@ static int clkgen_pll_is_enabled(struct clk_hw *hw)
return !poweroff;
}
+static int __clkgen_pll_enable(struct clk_hw *hw)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ void __iomem *base = pll->regs_base;
+ struct clkgen_field *field = &pll->data->locked_status;
+ int ret = 0;
+ u32 reg;
+
+ if (clkgen_pll_is_enabled(hw))
+ return 0;
+
+ CLKGEN_WRITE(pll, pdn_ctrl, 0);
+
+ ret = readl_relaxed_poll_timeout(base + field->offset, reg,
+ !!((reg >> field->shift) & field->mask), 0, 10000);
+
+ if (!ret) {
+ if (pll->data->switch2pll_en)
+ CLKGEN_WRITE(pll, switch2pll, 0);
+
+ pr_debug("%s:%s enabled\n", __clk_get_name(hw->clk), __func__);
+ }
+
+ return ret;
+}
+
+static int clkgen_pll_enable(struct clk_hw *hw)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ unsigned long flags = 0;
+ int ret = 0;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ ret = __clkgen_pll_enable(hw);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
+static void __clkgen_pll_disable(struct clk_hw *hw)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+
+ if (!clkgen_pll_is_enabled(hw))
+ return;
+
+ if (pll->data->switch2pll_en)
+ CLKGEN_WRITE(pll, switch2pll, 1);
+
+ CLKGEN_WRITE(pll, pdn_ctrl, 1);
+
+ pr_debug("%s:%s disabled\n", __clk_get_name(hw->clk), __func__);
+}
+
+static void clkgen_pll_disable(struct clk_hw *hw)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ unsigned long flags = 0;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ __clkgen_pll_disable(hw);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
static unsigned long recalc_stm_pll800c65(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -322,6 +461,67 @@ static unsigned long recalc_stm_pll1600c65(struct clk_hw *hw,
return rate;
}
+static int clk_pll3200c32_get_params(unsigned long input, unsigned long output,
+ struct stm_pll *pll)
+{
+ unsigned long i, n;
+ unsigned long deviation = ~0;
+ unsigned long new_freq;
+ long new_deviation;
+ /* Charge pump table: highest ndiv value for cp=6 to 25 */
+ static const unsigned char cp_table[] = {
+ 48, 56, 64, 72, 80, 88, 96, 104, 112, 120,
+ 128, 136, 144, 152, 160, 168, 176, 184, 192
+ };
+
+ /* Output clock range: 800Mhz to 1600Mhz */
+ if (output < 800000000 || output > 1600000000)
+ return -EINVAL;
+
+ input /= 1000;
+ output /= 1000;
+
+ for (i = 1; i <= 7 && deviation; i++) {
+ n = i * output / (2 * input);
+
+ /* Checks */
+ if (n < 8)
+ continue;
+ if (n > 200)
+ break;
+
+ new_freq = (input * 2 * n) / i;
+
+ new_deviation = abs(new_freq - output);
+
+ if (!new_deviation || new_deviation < deviation) {
+ pll->idf = i;
+ pll->ndiv = n;
+ deviation = new_deviation;
+ }
+ }
+
+ if (deviation == ~0) /* No solution found */
+ return -EINVAL;
+
+ /* Computing recommended charge pump value */
+ for (pll->cp = 6; pll->ndiv > cp_table[pll->cp-6]; (pll->cp)++)
+ ;
+
+ return 0;
+}
+
+static int clk_pll3200c32_get_rate(unsigned long input, struct stm_pll *pll,
+ unsigned long *rate)
+{
+ if (!pll->idf)
+ pll->idf = 1;
+
+ *rate = ((2 * (input / 1000) * pll->ndiv) / pll->idf) * 1000;
+
+ return 0;
+}
+
static unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -344,6 +544,70 @@ static unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
return rate;
}
+static long round_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct stm_pll params;
+
+ if (!clk_pll3200c32_get_params(*prate, rate, &params))
+ clk_pll3200c32_get_rate(*prate, &params, &rate);
+ else {
+ pr_debug("%s: %s rate %ld Invalid\n", __func__,
+ __clk_get_name(hw->clk), rate);
+ return 0;
+ }
+
+ pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n",
+ __func__, __clk_get_name(hw->clk),
+ rate, (unsigned int)params.ndiv,
+ (unsigned int)params.idf);
+
+ return rate;
+}
+
+static int set_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ struct stm_pll params;
+ long hwrate = 0;
+ unsigned long flags = 0;
+
+ if (!rate || !parent_rate)
+ return -EINVAL;
+
+ if (!clk_pll3200c32_get_params(parent_rate, rate, &params))
+ clk_pll3200c32_get_rate(parent_rate, &params, &hwrate);
+
+ pr_debug("%s: %s new rate %ld [ndiv=0x%x] [idf=0x%x]\n",
+ __func__, __clk_get_name(hw->clk),
+ hwrate, (unsigned int)params.ndiv,
+ (unsigned int)params.idf);
+
+ if (!hwrate)
+ return -EINVAL;
+
+ pll->ndiv = params.ndiv;
+ pll->idf = params.idf;
+ pll->cp = params.cp;
+
+ __clkgen_pll_disable(hw);
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ CLKGEN_WRITE(pll, ndiv, pll->ndiv);
+ CLKGEN_WRITE(pll, idf, pll->idf);
+ CLKGEN_WRITE(pll, cp, pll->cp);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ __clkgen_pll_enable(hw);
+
+ return 0;
+}
+
static unsigned long recalc_stm_pll1200c32(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -371,30 +635,213 @@ static unsigned long recalc_stm_pll1200c32(struct clk_hw *hw,
return rate;
}
+/* PLL output structure
+ * FVCO >> /2 >> FVCOBY2 (no output)
+ * |> Divider (ODF) >> PHI
+ *
+ * FVCOby2 output = (input * 2 * NDIV) / IDF (assuming FRAC_CONTROL==L)
+ *
+ * Rules:
+ * 4Mhz <= INFF input <= 350Mhz
+ * 4Mhz <= INFIN (INFF / IDF) <= 50Mhz
+ * 19.05Mhz <= FVCOby2 output (PHI w ODF=1) <= 3000Mhz
+ * 1 <= i (register/dec value for IDF) <= 7
+ * 8 <= n (register/dec value for NDIV) <= 246
+ */
+
+static int clk_pll4600c28_get_params(unsigned long input, unsigned long output,
+ struct stm_pll *pll)
+{
+
+ unsigned long i, infin, n;
+ unsigned long deviation = ~0;
+ unsigned long new_freq, new_deviation;
+
+ /* Output clock range: 19Mhz to 3000Mhz */
+ if (output < 19000000 || output > 3000000000u)
+ return -EINVAL;
+
+ /* For better jitter, IDF should be smallest and NDIV must be maximum */
+ for (i = 1; i <= 7 && deviation; i++) {
+ /* INFIN checks */
+ infin = input / i;
+ if (infin < 4000000 || infin > 50000000)
+ continue; /* Invalid case */
+
+ n = output / (infin * 2);
+ if (n < 8 || n > 246)
+ continue; /* Invalid case */
+ if (n < 246)
+ n++; /* To work around 'y' when n=x.y */
+
+ for (; n >= 8 && deviation; n--) {
+ new_freq = infin * 2 * n;
+ if (new_freq < output)
+ break; /* Optimization: shorting loop */
+
+ new_deviation = new_freq - output;
+ if (!new_deviation || new_deviation < deviation) {
+ pll->idf = i;
+ pll->ndiv = n;
+ deviation = new_deviation;
+ }
+ }
+ }
+
+ if (deviation == ~0) /* No solution found */
+ return -EINVAL;
+
+ return 0;
+}
+
+static int clk_pll4600c28_get_rate(unsigned long input, struct stm_pll *pll,
+ unsigned long *rate)
+{
+ if (!pll->idf)
+ pll->idf = 1;
+
+ *rate = (input / pll->idf) * 2 * pll->ndiv;
+
+ return 0;
+}
+
+static unsigned long recalc_stm_pll4600c28(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ struct stm_pll params;
+ unsigned long rate;
+
+ if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
+ return 0;
+
+ params.ndiv = CLKGEN_READ(pll, ndiv);
+ params.idf = CLKGEN_READ(pll, idf);
+
+ clk_pll4600c28_get_rate(parent_rate, &params, &rate);
+
+ pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+
+ return rate;
+}
+
+static long round_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct stm_pll params;
+
+ if (!clk_pll4600c28_get_params(*prate, rate, &params)) {
+ clk_pll4600c28_get_rate(*prate, &params, &rate);
+ } else {
+ pr_debug("%s: %s rate %ld Invalid\n", __func__,
+ __clk_get_name(hw->clk), rate);
+ return 0;
+ }
+
+ pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n",
+ __func__, __clk_get_name(hw->clk),
+ rate, (unsigned int)params.ndiv,
+ (unsigned int)params.idf);
+
+ return rate;
+}
+
+static int set_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ struct stm_pll params;
+ long hwrate;
+ unsigned long flags = 0;
+
+ if (!rate || !parent_rate)
+ return -EINVAL;
+
+ if (!clk_pll4600c28_get_params(parent_rate, rate, &params)) {
+ clk_pll4600c28_get_rate(parent_rate, &params, &hwrate);
+ } else {
+ pr_debug("%s: %s rate %ld Invalid\n", __func__,
+ __clk_get_name(hw->clk), rate);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: %s new rate %ld [ndiv=0x%x] [idf=0x%x]\n",
+ __func__, __clk_get_name(hw->clk),
+ hwrate, (unsigned int)params.ndiv,
+ (unsigned int)params.idf);
+
+ if (!hwrate)
+ return -EINVAL;
+
+ pll->ndiv = params.ndiv;
+ pll->idf = params.idf;
+
+ __clkgen_pll_disable(hw);
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ CLKGEN_WRITE(pll, ndiv, pll->ndiv);
+ CLKGEN_WRITE(pll, idf, pll->idf);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ __clkgen_pll_enable(hw);
+
+ return 0;
+}
+
static const struct clk_ops st_pll1600c65_ops = {
+ .enable = clkgen_pll_enable,
+ .disable = clkgen_pll_disable,
.is_enabled = clkgen_pll_is_enabled,
.recalc_rate = recalc_stm_pll1600c65,
};
static const struct clk_ops st_pll800c65_ops = {
+ .enable = clkgen_pll_enable,
+ .disable = clkgen_pll_disable,
.is_enabled = clkgen_pll_is_enabled,
.recalc_rate = recalc_stm_pll800c65,
};
static const struct clk_ops stm_pll3200c32_ops = {
+ .enable = clkgen_pll_enable,
+ .disable = clkgen_pll_disable,
.is_enabled = clkgen_pll_is_enabled,
.recalc_rate = recalc_stm_pll3200c32,
};
+static const struct clk_ops stm_pll3200c32_a9_ops = {
+ .enable = clkgen_pll_enable,
+ .disable = clkgen_pll_disable,
+ .is_enabled = clkgen_pll_is_enabled,
+ .recalc_rate = recalc_stm_pll3200c32,
+ .round_rate = round_rate_stm_pll3200c32,
+ .set_rate = set_rate_stm_pll3200c32,
+};
+
static const struct clk_ops st_pll1200c32_ops = {
+ .enable = clkgen_pll_enable,
+ .disable = clkgen_pll_disable,
.is_enabled = clkgen_pll_is_enabled,
.recalc_rate = recalc_stm_pll1200c32,
};
+static const struct clk_ops stm_pll4600c28_ops = {
+ .enable = clkgen_pll_enable,
+ .disable = clkgen_pll_disable,
+ .is_enabled = clkgen_pll_is_enabled,
+ .recalc_rate = recalc_stm_pll4600c28,
+ .round_rate = round_rate_stm_pll4600c28,
+ .set_rate = set_rate_stm_pll4600c28,
+};
+
static struct clk * __init clkgen_pll_register(const char *parent_name,
struct clkgen_pll_data *pll_data,
void __iomem *reg,
- const char *clk_name)
+ const char *clk_name, spinlock_t *lock)
{
struct clkgen_pll *pll;
struct clk *clk;
@@ -414,6 +861,7 @@ static struct clk * __init clkgen_pll_register(const char *parent_name,
pll->data = pll_data;
pll->regs_base = reg;
pll->hw.init = &init;
+ pll->lock = lock;
clk = clk_register(NULL, &pll->hw);
if (IS_ERR(clk)) {
@@ -500,7 +948,7 @@ static void __init clkgena_c65_pll_setup(struct device_node *np)
*/
clk_data->clks[0] = clkgen_pll_register(parent_name,
(struct clkgen_pll_data *) &st_pll1600c65_ax,
- reg + CLKGENAx_PLL0_OFFSET, clk_name);
+ reg + CLKGENAx_PLL0_OFFSET, clk_name, NULL);
if (IS_ERR(clk_data->clks[0]))
goto err;
@@ -529,7 +977,7 @@ static void __init clkgena_c65_pll_setup(struct device_node *np)
*/
clk_data->clks[2] = clkgen_pll_register(parent_name,
(struct clkgen_pll_data *) &st_pll800c65_ax,
- reg + CLKGENAx_PLL1_OFFSET, clk_name);
+ reg + CLKGENAx_PLL1_OFFSET, clk_name, NULL);
if (IS_ERR(clk_data->clks[2]))
goto err;
@@ -556,7 +1004,7 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
struct clk_gate *gate;
struct clk_divider *div;
- flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_GATE;
+ flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT;
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
@@ -635,6 +1083,10 @@ static const struct of_device_id c32_pll_of_match[] = {
.compatible = "st,stih407-plls-c32-a9",
.data = &st_pll3200c32_407_a9,
},
+ {
+ .compatible = "st,stih418-plls-c28-a9",
+ .data = &st_pll4600c28_418_a9,
+ },
{}
};
@@ -664,7 +1116,8 @@ static void __init clkgen_c32_pll_setup(struct device_node *np)
if (!pll_base)
return;
- clk = clkgen_pll_register(parent_name, data, pll_base, np->name);
+ clk = clkgen_pll_register(parent_name, data, pll_base, np->name,
+ data->lock);
if (IS_ERR(clk))
return;
@@ -753,7 +1206,7 @@ static void __init clkgengpu_c32_pll_setup(struct device_node *np)
/*
* PLL 1200MHz output
*/
- clk = clkgen_pll_register(parent_name, data, reg, clk_name);
+ clk = clkgen_pll_register(parent_name, data, reg, clk_name, data->lock);
if (!IS_ERR(clk))
of_clk_add_provider(np, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/st/clkgen.h b/drivers/clk/st/clkgen.h
index 35c863295268..f7ec2d9139d6 100644
--- a/drivers/clk/st/clkgen.h
+++ b/drivers/clk/st/clkgen.h
@@ -9,6 +9,8 @@ Copyright (C) 2014 STMicroelectronics
#ifndef __CLKGEN_INFO_H
#define __CLKGEN_INFO_H
+extern spinlock_t clkgen_a9_lock;
+
struct clkgen_field {
unsigned int offset;
unsigned int mask;
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index f5a35b82cc1a..cb4c299214ce 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -3,7 +3,10 @@
#
obj-y += clk-sunxi.o clk-factors.o
+obj-y += clk-a10-codec.o
obj-y += clk-a10-hosc.o
+obj-y += clk-a10-mod1.o
+obj-y += clk-a10-pll2.o
obj-y += clk-a20-gmac.o
obj-y += clk-mod0.o
obj-y += clk-simple-gates.o
diff --git a/drivers/clk/sunxi/clk-a10-codec.c b/drivers/clk/sunxi/clk-a10-codec.c
new file mode 100644
index 000000000000..ac321d6a0df5
--- /dev/null
+++ b/drivers/clk/sunxi/clk-a10-codec.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2013 Emilio López
+ *
+ * Emilio López <emilio@elopez.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define SUN4I_CODEC_GATE 31
+
+static void __init sun4i_codec_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ const char *clk_name = node->name, *parent_name;
+ void __iomem *reg;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg))
+ return;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ clk = clk_register_gate(NULL, clk_name, parent_name,
+ CLK_SET_RATE_PARENT, reg,
+ SUN4I_CODEC_GATE, 0, NULL);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+CLK_OF_DECLARE(sun4i_codec, "allwinner,sun4i-a10-codec-clk",
+ sun4i_codec_clk_setup);
diff --git a/drivers/clk/sunxi/clk-a10-mod1.c b/drivers/clk/sunxi/clk-a10-mod1.c
new file mode 100644
index 000000000000..e9d870de165c
--- /dev/null
+++ b/drivers/clk/sunxi/clk-a10-mod1.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2013 Emilio López
+ *
+ * Emilio López <emilio@elopez.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+static DEFINE_SPINLOCK(mod1_lock);
+
+#define SUN4I_MOD1_ENABLE 31
+#define SUN4I_MOD1_MUX 16
+#define SUN4I_MOD1_MUX_WIDTH 2
+#define SUN4I_MOD1_MAX_PARENTS 4
+
+static void __init sun4i_mod1_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ struct clk_mux *mux;
+ struct clk_gate *gate;
+ const char *parents[4];
+ const char *clk_name = node->name;
+ void __iomem *reg;
+ int i;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg))
+ return;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ goto err_unmap;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto err_free_mux;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+ i = of_clk_parent_fill(node, parents, SUN4I_MOD1_MAX_PARENTS);
+
+ gate->reg = reg;
+ gate->bit_idx = SUN4I_MOD1_ENABLE;
+ gate->lock = &mod1_lock;
+ mux->reg = reg;
+ mux->shift = SUN4I_MOD1_MUX;
+ mux->mask = BIT(SUN4I_MOD1_MUX_WIDTH) - 1;
+ mux->lock = &mod1_lock;
+
+ clk = clk_register_composite(NULL, clk_name, parents, i,
+ &mux->hw, &clk_mux_ops,
+ NULL, NULL,
+ &gate->hw, &clk_gate_ops, 0);
+ if (IS_ERR(clk))
+ goto err_free_gate;
+
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ return;
+
+err_free_gate:
+ kfree(gate);
+err_free_mux:
+ kfree(mux);
+err_unmap:
+ iounmap(reg);
+}
+CLK_OF_DECLARE(sun4i_mod1, "allwinner,sun4i-a10-mod1-clk",
+ sun4i_mod1_clk_setup);
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
new file mode 100644
index 000000000000..5484c31ec568
--- /dev/null
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2013 Emilio López
+ * Emilio López <emilio@elopez.com.ar>
+ *
+ * Copyright 2015 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/sun4i-a10-pll2.h>
+
+#define SUN4I_PLL2_ENABLE 31
+
+#define SUN4I_PLL2_PRE_DIV_SHIFT 0
+#define SUN4I_PLL2_PRE_DIV_WIDTH 5
+#define SUN4I_PLL2_PRE_DIV_MASK GENMASK(SUN4I_PLL2_PRE_DIV_WIDTH - 1, 0)
+
+#define SUN4I_PLL2_N_SHIFT 8
+#define SUN4I_PLL2_N_WIDTH 7
+#define SUN4I_PLL2_N_MASK GENMASK(SUN4I_PLL2_N_WIDTH - 1, 0)
+
+#define SUN4I_PLL2_POST_DIV_SHIFT 26
+#define SUN4I_PLL2_POST_DIV_WIDTH 4
+#define SUN4I_PLL2_POST_DIV_MASK GENMASK(SUN4I_PLL2_POST_DIV_WIDTH - 1, 0)
+
+#define SUN4I_PLL2_POST_DIV_VALUE 4
+
+#define SUN4I_PLL2_OUTPUTS 4
+
+struct sun4i_pll2_data {
+ u32 post_div_offset;
+ u32 pre_div_flags;
+};
+
+static DEFINE_SPINLOCK(sun4i_a10_pll2_lock);
+
+static void __init sun4i_pll2_setup(struct device_node *node,
+ struct sun4i_pll2_data *data)
+{
+ const char *clk_name = node->name, *parent;
+ struct clk **clks, *base_clk, *prediv_clk;
+ struct clk_onecell_data *clk_data;
+ struct clk_multiplier *mult;
+ struct clk_gate *gate;
+ void __iomem *reg;
+ u32 val;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg))
+ return;
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ goto err_unmap;
+
+ clks = kcalloc(SUN4I_PLL2_OUTPUTS, sizeof(struct clk *), GFP_KERNEL);
+ if (!clks)
+ goto err_free_data;
+
+ parent = of_clk_get_parent_name(node, 0);
+ prediv_clk = clk_register_divider(NULL, "pll2-prediv",
+ parent, 0, reg,
+ SUN4I_PLL2_PRE_DIV_SHIFT,
+ SUN4I_PLL2_PRE_DIV_WIDTH,
+ data->pre_div_flags,
+ &sun4i_a10_pll2_lock);
+ if (!prediv_clk) {
+ pr_err("Couldn't register the prediv clock\n");
+ goto err_free_array;
+ }
+
+ /* Setup the gate part of the PLL2 */
+ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+ if (!gate)
+ goto err_unregister_prediv;
+
+ gate->reg = reg;
+ gate->bit_idx = SUN4I_PLL2_ENABLE;
+ gate->lock = &sun4i_a10_pll2_lock;
+
+ /* Setup the multiplier part of the PLL2 */
+ mult = kzalloc(sizeof(struct clk_multiplier), GFP_KERNEL);
+ if (!mult)
+ goto err_free_gate;
+
+ mult->reg = reg;
+ mult->shift = SUN4I_PLL2_N_SHIFT;
+ mult->width = 7;
+ mult->flags = CLK_MULTIPLIER_ZERO_BYPASS |
+ CLK_MULTIPLIER_ROUND_CLOSEST;
+ mult->lock = &sun4i_a10_pll2_lock;
+
+ parent = __clk_get_name(prediv_clk);
+ base_clk = clk_register_composite(NULL, "pll2-base",
+ &parent, 1,
+ NULL, NULL,
+ &mult->hw, &clk_multiplier_ops,
+ &gate->hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
+ if (!base_clk) {
+ pr_err("Couldn't register the base multiplier clock\n");
+ goto err_free_multiplier;
+ }
+
+ parent = __clk_get_name(base_clk);
+
+ /*
+ * PLL2-1x
+ *
+ * This is supposed to have a post divider, but we won't need
+ * to use it, we just need to initialise it to 4, and use a
+ * fixed divider.
+ */
+ val = readl(reg);
+ val &= ~(SUN4I_PLL2_POST_DIV_MASK << SUN4I_PLL2_POST_DIV_SHIFT);
+ val |= (SUN4I_PLL2_POST_DIV_VALUE - data->post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT;
+ writel(val, reg);
+
+ of_property_read_string_index(node, "clock-output-names",
+ SUN4I_A10_PLL2_1X, &clk_name);
+ clks[SUN4I_A10_PLL2_1X] = clk_register_fixed_factor(NULL, clk_name,
+ parent,
+ CLK_SET_RATE_PARENT,
+ 1,
+ SUN4I_PLL2_POST_DIV_VALUE);
+ WARN_ON(IS_ERR(clks[SUN4I_A10_PLL2_1X]));
+
+ /*
+ * PLL2-2x
+ *
+ * This clock doesn't use the post divider, and really is just
+ * a fixed divider from the PLL2 base clock.
+ */
+ of_property_read_string_index(node, "clock-output-names",
+ SUN4I_A10_PLL2_2X, &clk_name);
+ clks[SUN4I_A10_PLL2_2X] = clk_register_fixed_factor(NULL, clk_name,
+ parent,
+ CLK_SET_RATE_PARENT,
+ 1, 2);
+ WARN_ON(IS_ERR(clks[SUN4I_A10_PLL2_2X]));
+
+ /* PLL2-4x */
+ of_property_read_string_index(node, "clock-output-names",
+ SUN4I_A10_PLL2_4X, &clk_name);
+ clks[SUN4I_A10_PLL2_4X] = clk_register_fixed_factor(NULL, clk_name,
+ parent,
+ CLK_SET_RATE_PARENT,
+ 1, 1);
+ WARN_ON(IS_ERR(clks[SUN4I_A10_PLL2_4X]));
+
+ /* PLL2-8x */
+ of_property_read_string_index(node, "clock-output-names",
+ SUN4I_A10_PLL2_8X, &clk_name);
+ clks[SUN4I_A10_PLL2_8X] = clk_register_fixed_factor(NULL, clk_name,
+ parent,
+ CLK_SET_RATE_PARENT,
+ 2, 1);
+ WARN_ON(IS_ERR(clks[SUN4I_A10_PLL2_8X]));
+
+ clk_data->clks = clks;
+ clk_data->clk_num = SUN4I_PLL2_OUTPUTS;
+ of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ return;
+
+err_free_multiplier:
+ kfree(mult);
+err_free_gate:
+ kfree(gate);
+err_unregister_prediv:
+ clk_unregister_divider(prediv_clk);
+err_free_array:
+ kfree(clks);
+err_free_data:
+ kfree(clk_data);
+err_unmap:
+ iounmap(reg);
+}
+
+static struct sun4i_pll2_data sun4i_a10_pll2_data = {
+ .pre_div_flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+};
+
+static void __init sun4i_a10_pll2_setup(struct device_node *node)
+{
+ sun4i_pll2_setup(node, &sun4i_a10_pll2_data);
+}
+
+CLK_OF_DECLARE(sun4i_a10_pll2, "allwinner,sun4i-a10-pll2-clk",
+ sun4i_a10_pll2_setup);
+
+static struct sun4i_pll2_data sun5i_a13_pll2_data = {
+ .post_div_offset = 1,
+};
+
+static void __init sun5i_a13_pll2_setup(struct device_node *node)
+{
+ sun4i_pll2_setup(node, &sun5i_a13_pll2_data);
+}
+
+CLK_OF_DECLARE(sun5i_a13_pll2, "allwinner,sun5i-a13-pll2-clk",
+ sun5i_a13_pll2_setup);
diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c
index 6ce91180da1b..0214c6548afd 100644
--- a/drivers/clk/sunxi/clk-simple-gates.c
+++ b/drivers/clk/sunxi/clk-simple-gates.c
@@ -128,6 +128,8 @@ CLK_OF_DECLARE(sun8i_a23_apb1, "allwinner,sun8i-a23-apb1-gates-clk",
sunxi_simple_gates_init);
CLK_OF_DECLARE(sun8i_a23_apb2, "allwinner,sun8i-a23-apb2-gates-clk",
sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun8i_a33_ahb1, "allwinner,sun8i-a33-ahb1-gates-clk",
+ sunxi_simple_gates_init);
CLK_OF_DECLARE(sun9i_a80_ahb0, "allwinner,sun9i-a80-ahb0-gates-clk",
sunxi_simple_gates_init);
CLK_OF_DECLARE(sun9i_a80_ahb1, "allwinner,sun9i-a80-ahb1-gates-clk",
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
index 64f3e46d383c..23d042aabb4f 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -34,6 +34,7 @@ static const struct of_device_id sun6i_a31_apb0_gates_clk_dt_ids[] = {
{ .compatible = "allwinner,sun8i-a23-apb0-gates-clk", .data = &sun8i_a23_apb0_gates },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, sun6i_a31_apb0_gates_clk_dt_ids);
static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
{
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0.c b/drivers/clk/sunxi/clk-sun6i-apb0.c
index 70763600aeae..e703e1895b76 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0.c
@@ -61,6 +61,7 @@ static const struct of_device_id sun6i_a31_apb0_clk_dt_ids[] = {
{ .compatible = "allwinner,sun6i-a31-apb0-clk" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, sun6i_a31_apb0_clk_dt_ids);
static struct platform_driver sun6i_a31_apb0_clk_driver = {
.driver = {
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index 806fd019c05d..20887686bdbe 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -219,6 +219,7 @@ static const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = {
{ .compatible = "allwinner,sun6i-a31-ar100-clk" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, sun6i_a31_ar100_clk_dt_ids);
static struct platform_driver sun6i_a31_ar100_clk_driver = {
.driver = {
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
index 155d0022194f..7ae5d2c2cde1 100644
--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
@@ -52,6 +52,7 @@ static const struct of_device_id sun8i_a23_apb0_clk_dt_ids[] = {
{ .compatible = "allwinner,sun8i-a23-apb0-clk" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, sun8i_a23_apb0_clk_dt_ids);
static struct platform_driver sun8i_a23_apb0_clk_driver = {
.driver = {
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index 3436a948b796..a9b176139aca 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -204,6 +204,7 @@ static const struct of_device_id sun9i_a80_mmc_config_clk_dt_ids[] = {
{ .compatible = "allwinner,sun9i-a80-mmc-config-clk" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, sun9i_a80_mmc_config_clk_dt_ids);
static struct platform_driver sun9i_a80_mmc_config_clk_driver = {
.driver = {
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index c4e3a52e225b..86a307b17eb0 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -469,56 +469,6 @@ static unsigned long dfll_scale_dvco_rate(int scale_bits,
}
/*
- * Monitor control
- */
-
-/**
- * dfll_calc_monitored_rate - convert DFLL_MONITOR_DATA_VAL rate into real freq
- * @monitor_data: value read from the DFLL_MONITOR_DATA_VAL bitfield
- * @ref_rate: DFLL reference clock rate
- *
- * Convert @monitor_data from DFLL_MONITOR_DATA_VAL units into cycles
- * per second. Returns the converted value.
- */
-static u64 dfll_calc_monitored_rate(u32 monitor_data,
- unsigned long ref_rate)
-{
- return monitor_data * (ref_rate / REF_CLK_CYC_PER_DVCO_SAMPLE);
-}
-
-/**
- * dfll_read_monitor_rate - return the DFLL's output rate from internal monitor
- * @td: DFLL instance
- *
- * If the DFLL is enabled, return the last rate reported by the DFLL's
- * internal monitoring hardware. This works in both open-loop and
- * closed-loop mode, and takes the output scaler setting into account.
- * Assumes that the monitor was programmed to monitor frequency before
- * the sample period started. If the driver believes that the DFLL is
- * currently uninitialized or disabled, it will return 0, since
- * otherwise the DFLL monitor data register will return the last
- * measured rate from when the DFLL was active.
- */
-static u64 dfll_read_monitor_rate(struct tegra_dfll *td)
-{
- u32 v, s;
- u64 pre_scaler_rate, post_scaler_rate;
-
- if (!dfll_is_running(td))
- return 0;
-
- v = dfll_readl(td, DFLL_MONITOR_DATA);
- v = (v & DFLL_MONITOR_DATA_VAL_MASK) >> DFLL_MONITOR_DATA_VAL_SHIFT;
- pre_scaler_rate = dfll_calc_monitored_rate(v, td->ref_rate);
-
- s = dfll_readl(td, DFLL_FREQ_REQ);
- s = (s & DFLL_FREQ_REQ_SCALE_MASK) >> DFLL_FREQ_REQ_SCALE_SHIFT;
- post_scaler_rate = dfll_scale_dvco_rate(s, pre_scaler_rate);
-
- return post_scaler_rate;
-}
-
-/*
* DFLL mode switching
*/
@@ -1006,24 +956,25 @@ static unsigned long dfll_clk_recalc_rate(struct clk_hw *hw,
return td->last_unrounded_rate;
}
-static long dfll_clk_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+/* Must use determine_rate since it allows for rates exceeding 2^31-1 */
+static int dfll_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *clk_req)
{
struct tegra_dfll *td = clk_hw_to_dfll(hw);
struct dfll_rate_req req;
int ret;
- ret = dfll_calculate_rate_request(td, &req, rate);
+ ret = dfll_calculate_rate_request(td, &req, clk_req->rate);
if (ret)
return ret;
/*
- * Don't return the rounded rate, since it doesn't really matter as
+ * Don't set the rounded rate, since it doesn't really matter as
* the output rate will be voltage controlled anyway, and cpufreq
* freaks out if any rounding happens.
*/
- return rate;
+
+ return 0;
}
static int dfll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1039,7 +990,7 @@ static const struct clk_ops dfll_clk_ops = {
.enable = dfll_clk_enable,
.disable = dfll_clk_disable,
.recalc_rate = dfll_clk_recalc_rate,
- .round_rate = dfll_clk_round_rate,
+ .determine_rate = dfll_clk_determine_rate,
.set_rate = dfll_clk_set_rate,
};
@@ -1101,6 +1052,55 @@ static void dfll_unregister_clk(struct tegra_dfll *td)
*/
#ifdef CONFIG_DEBUG_FS
+/*
+ * Monitor control
+ */
+
+/**
+ * dfll_calc_monitored_rate - convert DFLL_MONITOR_DATA_VAL rate into real freq
+ * @monitor_data: value read from the DFLL_MONITOR_DATA_VAL bitfield
+ * @ref_rate: DFLL reference clock rate
+ *
+ * Convert @monitor_data from DFLL_MONITOR_DATA_VAL units into cycles
+ * per second. Returns the converted value.
+ */
+static u64 dfll_calc_monitored_rate(u32 monitor_data,
+ unsigned long ref_rate)
+{
+ return monitor_data * (ref_rate / REF_CLK_CYC_PER_DVCO_SAMPLE);
+}
+
+/**
+ * dfll_read_monitor_rate - return the DFLL's output rate from internal monitor
+ * @td: DFLL instance
+ *
+ * If the DFLL is enabled, return the last rate reported by the DFLL's
+ * internal monitoring hardware. This works in both open-loop and
+ * closed-loop mode, and takes the output scaler setting into account.
+ * Assumes that the monitor was programmed to monitor frequency before
+ * the sample period started. If the driver believes that the DFLL is
+ * currently uninitialized or disabled, it will return 0, since
+ * otherwise the DFLL monitor data register will return the last
+ * measured rate from when the DFLL was active.
+ */
+static u64 dfll_read_monitor_rate(struct tegra_dfll *td)
+{
+ u32 v, s;
+ u64 pre_scaler_rate, post_scaler_rate;
+
+ if (!dfll_is_running(td))
+ return 0;
+
+ v = dfll_readl(td, DFLL_MONITOR_DATA);
+ v = (v & DFLL_MONITOR_DATA_VAL_MASK) >> DFLL_MONITOR_DATA_VAL_SHIFT;
+ pre_scaler_rate = dfll_calc_monitored_rate(v, td->ref_rate);
+
+ s = dfll_readl(td, DFLL_FREQ_REQ);
+ s = (s & DFLL_FREQ_REQ_SCALE_MASK) >> DFLL_FREQ_REQ_SCALE_SHIFT;
+ post_scaler_rate = dfll_scale_dvco_rate(s, pre_scaler_rate);
+
+ return post_scaler_rate;
+}
static int attr_enable_get(void *data, u64 *val)
{
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 138a94b99b5b..e1fe8f35d45c 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -491,10 +491,8 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
for_each_child_of_node(np, node) {
err = of_property_read_u32(node, "nvidia,ram-code",
&node_ram_code);
- if (err) {
- of_node_put(node);
+ if (err)
continue;
- }
/*
* Store timings for all ram codes as we cannot read the
diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c
index 11e3ad7ad7a3..e2bfa9b368f6 100644
--- a/drivers/clk/tegra/clk-tegra-audio.c
+++ b/drivers/clk/tegra/clk-tegra-audio.c
@@ -125,18 +125,29 @@ static struct tegra_audio2x_clk_initdata audio2x_clks[] = {
void __init tegra_audio_clk_init(void __iomem *clk_base,
void __iomem *pmc_base, struct tegra_clk *tegra_clks,
- struct tegra_clk_pll_params *pll_a_params)
+ struct tegra_audio_clk_info *audio_info,
+ unsigned int num_plls)
{
struct clk *clk;
struct clk **dt_clk;
int i;
- /* PLLA */
- dt_clk = tegra_lookup_dt_id(tegra_clk_pll_a, tegra_clks);
- if (dt_clk) {
- clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base,
- pmc_base, 0, pll_a_params, NULL);
- *dt_clk = clk;
+ if (!audio_info || num_plls < 1) {
+ pr_err("No audio data passed to tegra_audio_clk_init\n");
+ WARN_ON(1);
+ return;
+ }
+
+ for (i = 0; i < num_plls; i++) {
+ struct tegra_audio_clk_info *info = &audio_info[i];
+
+ dt_clk = tegra_lookup_dt_id(info->clk_id, tegra_clks);
+ if (dt_clk) {
+ clk = tegra_clk_register_pll(info->name, info->parent,
+ clk_base, pmc_base, 0, info->pll_params,
+ NULL);
+ *dt_clk = clk;
+ }
}
/* PLLA_OUT0 */
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index db5871519bf5..b7d03e9add97 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -933,6 +933,10 @@ static u32 mux_pllm_pllc2_c_c3_pllp_plla_idx[] = {
[0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
};
+static struct tegra_audio_clk_info tegra114_audio_plls[] = {
+ { "pll_a", &pll_a_params, tegra_clk_pll_a, "pll_p_out1" },
+};
+
static struct clk **clks;
static unsigned long osc_freq;
@@ -1481,7 +1485,9 @@ static void __init tegra114_clock_init(struct device_node *np)
tegra114_fixed_clk_init(clk_base);
tegra114_pll_init(clk_base, pmc_base);
tegra114_periph_clk_init(clk_base, pmc_base);
- tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks, &pll_a_params);
+ tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks,
+ tegra114_audio_plls,
+ ARRAY_SIZE(tegra114_audio_plls));
tegra_pmc_clk_init(pmc_base, tegra114_clks);
tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
&pll_x_params);
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 824d75883d2b..87975f7adddc 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1417,6 +1417,10 @@ static struct tegra_clk_init_table tegra132_init_table[] __initdata = {
{TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
};
+static struct tegra_audio_clk_info tegra124_audio_plls[] = {
+ { "pll_a", &pll_a_params, tegra_clk_pll_a, "pll_p_out1" },
+};
+
/**
* tegra124_clock_apply_init_table - initialize clocks on Tegra124 SoCs
*
@@ -1555,7 +1559,9 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
tegra_fixed_clk_init(tegra124_clks);
tegra124_pll_init(clk_base, pmc_base);
tegra124_periph_clk_init(clk_base, pmc_base);
- tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params);
+ tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks,
+ tegra124_audio_plls,
+ ARRAY_SIZE(tegra124_audio_plls));
tegra_pmc_clk_init(pmc_base, tegra124_clks);
/* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index fad561a5896b..b90db615c29e 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1405,6 +1405,10 @@ static const struct of_device_id pmc_match[] __initconst = {
{},
};
+static struct tegra_audio_clk_info tegra30_audio_plls[] = {
+ { "pll_a", &pll_a_params, tegra_clk_pll_a, "pll_p_out1" },
+};
+
static void __init tegra30_clock_init(struct device_node *np)
{
struct device_node *node;
@@ -1442,7 +1446,9 @@ static void __init tegra30_clock_init(struct device_node *np)
tegra30_pll_init();
tegra30_super_clk_init();
tegra30_periph_clk_init();
- tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks, &pll_a_params);
+ tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks,
+ tegra30_audio_plls,
+ ARRAY_SIZE(tegra30_audio_plls));
tegra_pmc_clk_init(pmc_base, tegra30_clks);
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 0621887e06f7..5d2678914160 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -157,7 +157,7 @@ struct div_nmp {
};
/**
- * struct clk_pll_params - PLL parameters
+ * struct tegra_clk_pll_params - PLL parameters
*
* @input_min: Minimum input frequency
* @input_max: Maximum input frequency
@@ -168,9 +168,45 @@ struct div_nmp {
* @base_reg: PLL base reg offset
* @misc_reg: PLL misc reg offset
* @lock_reg: PLL lock reg offset
- * @lock_bit_idx: Bit index for PLL lock status
+ * @lock_mask: Bitmask for PLL lock status
* @lock_enable_bit_idx: Bit index to enable PLL lock
+ * @iddq_reg: PLL IDDQ register offset
+ * @iddq_bit_idx: Bit index to enable PLL IDDQ
+ * @aux_reg: AUX register offset
+ * @dyn_ramp_reg: Dynamic ramp control register offset
+ * @ext_misc_reg: Miscellaneous control register offsets
+ * @pmc_divnm_reg: n, m divider PMC override register offset (PLLM)
+ * @pmc_divp_reg: p divider PMC override register offset (PLLM)
+ * @flags: PLL flags
+ * @stepa_shift: Dynamic ramp step A field shift
+ * @stepb_shift: Dynamic ramp step B field shift
* @lock_delay: Delay in us if PLL lock is not used
+ * @max_p: maximum value for the p divider
+ * @pdiv_tohw: mapping of p divider to register values
+ * @div_nmp: offsets and widths on n, m and p fields
+ * @freq_table: array of frequencies supported by PLL
+ * @fixed_rate: PLL rate if it is fixed
+ *
+ * Flags:
+ * TEGRA_PLL_USE_LOCK - This flag indicated to use lock bits for
+ * PLL locking. If not set it will use lock_delay value to wait.
+ * TEGRA_PLL_HAS_CPCON - This flag indicates that CPCON value needs
+ * to be programmed to change output frequency of the PLL.
+ * TEGRA_PLL_SET_LFCON - This flag indicates that LFCON value needs
+ * to be programmed to change output frequency of the PLL.
+ * TEGRA_PLL_SET_DCCON - This flag indicates that DCCON value needs
+ * to be programmed to change output frequency of the PLL.
+ * TEGRA_PLLU - PLLU has inverted post divider. This flags indicated
+ * that it is PLLU and invert post divider value.
+ * TEGRA_PLLM - PLLM has additional override settings in PMC. This
+ * flag indicates that it is PLLM and use override settings.
+ * TEGRA_PLL_FIXED - We are not supposed to change output frequency
+ * of some plls.
+ * TEGRA_PLLE_CONFIGURE - Configure PLLE when enabling.
+ * TEGRA_PLL_LOCK_MISC - Lock bit is in the misc register instead of the
+ * base register.
+ * TEGRA_PLL_BYPASS - PLL has bypass bit
+ * TEGRA_PLL_HAS_LOCK_ENABLE - PLL has bit to enable lock monitoring
*/
struct tegra_clk_pll_params {
unsigned long input_min;
@@ -203,38 +239,26 @@ struct tegra_clk_pll_params {
unsigned long fixed_rate;
};
+#define TEGRA_PLL_USE_LOCK BIT(0)
+#define TEGRA_PLL_HAS_CPCON BIT(1)
+#define TEGRA_PLL_SET_LFCON BIT(2)
+#define TEGRA_PLL_SET_DCCON BIT(3)
+#define TEGRA_PLLU BIT(4)
+#define TEGRA_PLLM BIT(5)
+#define TEGRA_PLL_FIXED BIT(6)
+#define TEGRA_PLLE_CONFIGURE BIT(7)
+#define TEGRA_PLL_LOCK_MISC BIT(8)
+#define TEGRA_PLL_BYPASS BIT(9)
+#define TEGRA_PLL_HAS_LOCK_ENABLE BIT(10)
+
/**
* struct tegra_clk_pll - Tegra PLL clock
*
* @hw: handle between common and hardware-specifix interfaces
* @clk_base: address of CAR controller
* @pmc: address of PMC, required to read override bits
- * @freq_table: array of frequencies supported by PLL
- * @params: PLL parameters
- * @flags: PLL flags
- * @fixed_rate: PLL rate if it is fixed
* @lock: register lock
- *
- * Flags:
- * TEGRA_PLL_USE_LOCK - This flag indicated to use lock bits for
- * PLL locking. If not set it will use lock_delay value to wait.
- * TEGRA_PLL_HAS_CPCON - This flag indicates that CPCON value needs
- * to be programmed to change output frequency of the PLL.
- * TEGRA_PLL_SET_LFCON - This flag indicates that LFCON value needs
- * to be programmed to change output frequency of the PLL.
- * TEGRA_PLL_SET_DCCON - This flag indicates that DCCON value needs
- * to be programmed to change output frequency of the PLL.
- * TEGRA_PLLU - PLLU has inverted post divider. This flags indicated
- * that it is PLLU and invert post divider value.
- * TEGRA_PLLM - PLLM has additional override settings in PMC. This
- * flag indicates that it is PLLM and use override settings.
- * TEGRA_PLL_FIXED - We are not supposed to change output frequency
- * of some plls.
- * TEGRA_PLLE_CONFIGURE - Configure PLLE when enabling.
- * TEGRA_PLL_LOCK_MISC - Lock bit is in the misc register instead of the
- * base register.
- * TEGRA_PLL_BYPASS - PLL has bypass bit
- * TEGRA_PLL_HAS_LOCK_ENABLE - PLL has bit to enable lock monitoring
+ * @params: PLL parameters
*/
struct tegra_clk_pll {
struct clk_hw hw;
@@ -246,17 +270,20 @@ struct tegra_clk_pll {
#define to_clk_pll(_hw) container_of(_hw, struct tegra_clk_pll, hw)
-#define TEGRA_PLL_USE_LOCK BIT(0)
-#define TEGRA_PLL_HAS_CPCON BIT(1)
-#define TEGRA_PLL_SET_LFCON BIT(2)
-#define TEGRA_PLL_SET_DCCON BIT(3)
-#define TEGRA_PLLU BIT(4)
-#define TEGRA_PLLM BIT(5)
-#define TEGRA_PLL_FIXED BIT(6)
-#define TEGRA_PLLE_CONFIGURE BIT(7)
-#define TEGRA_PLL_LOCK_MISC BIT(8)
-#define TEGRA_PLL_BYPASS BIT(9)
-#define TEGRA_PLL_HAS_LOCK_ENABLE BIT(10)
+/**
+ * struct tegra_audio_clk_info - Tegra Audio Clk Information
+ *
+ * @name: name for the audio pll
+ * @pll_params: pll_params for audio pll
+ * @clk_id: clk_ids for the audio pll
+ * @parent: name of the parent of the audio pll
+ */
+struct tegra_audio_clk_info {
+ char *name;
+ struct tegra_clk_pll_params *pll_params;
+ int clk_id;
+ char *parent;
+};
extern const struct clk_ops tegra_clk_pll_ops;
extern const struct clk_ops tegra_clk_plle_ops;
@@ -610,7 +637,8 @@ void tegra_register_devclks(struct tegra_devclk *dev_clks, int num);
void tegra_audio_clk_init(void __iomem *clk_base,
void __iomem *pmc_base, struct tegra_clk *tegra_clks,
- struct tegra_clk_pll_params *pll_params);
+ struct tegra_audio_clk_info *audio_info,
+ unsigned int num_plls);
void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base,
struct tegra_clk *tegra_clks,
diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c
index 0204e0861134..69c74eec3a4b 100644
--- a/drivers/clk/tegra/cvb.c
+++ b/drivers/clk/tegra/cvb.c
@@ -78,13 +78,6 @@ static int build_opp_table(const struct cvb_table *d,
if (!table->freq || (table->freq > max_freq))
break;
- /*
- * FIXME after clk_round_rate/clk_determine_rate prototypes
- * have been updated
- */
- if (table->freq & (1<<31))
- continue;
-
dfll_mv = get_cvb_voltage(
speedo_value, d->speedo_scale, &table->coefficients);
dfll_mv = round_cvb_voltage(dfll_mv, d->voltage_scale, align);
diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig
index 1530c9352a76..fc50b6264bed 100644
--- a/drivers/clk/versatile/Kconfig
+++ b/drivers/clk/versatile/Kconfig
@@ -1,6 +1,6 @@
config COMMON_CLK_VERSATILE
bool "Clock driver for ARM Reference designs"
- depends on ARCH_INTEGRATOR || ARCH_REALVIEW || ARCH_VEXPRESS || ARM64
+ depends on ARCH_INTEGRATOR || ARCH_REALVIEW || ARCH_VEXPRESS || ARM64 || COMPILE_TEST
---help---
Supports clocking on ARM Reference designs:
- Integrator/AP and Integrator/CP
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index a3893ea2199d..08c5ee976879 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -157,8 +157,10 @@ struct clk *icst_clk_register(struct device *dev,
icst->lockreg = base + desc->lock_offset;
clk = clk_register(dev, &icst->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ kfree(pclone);
kfree(icst);
+ }
return clk;
}
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index c8d794c58479..eac76a79a880 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -103,7 +103,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
/**
* efi_pstore_scan_sysfs_enter
- * @entry: scanning entry
+ * @pos: scanning entry
* @next: next entry
* @head: list head
*/
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index e3d968f751f1..60172f835d15 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -183,7 +183,6 @@ MODULE_DEVICE_TABLE(of, gen_74x164_dt_ids);
static struct spi_driver gen_74x164_driver = {
.driver = {
.name = "74x164",
- .owner = THIS_MODULE,
.of_match_table = gen_74x164_dt_ids,
},
.probe = gen_74x164_probe,
diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
index 6e1c984a75d4..05813fbf3daf 100644
--- a/drivers/gpio/gpio-max7301.c
+++ b/drivers/gpio/gpio-max7301.c
@@ -87,7 +87,6 @@ MODULE_DEVICE_TABLE(spi, max7301_id);
static struct spi_driver max7301_driver = {
.driver = {
.name = "max7301",
- .owner = THIS_MODULE,
},
.probe = max7301_probe,
.remove = max7301_remove,
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index a431604c9e67..2853731db5bc 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -163,7 +163,6 @@ static int mc33880_remove(struct spi_device *spi)
static struct spi_driver mc33880_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = mc33880_probe,
.remove = mc33880_remove,
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 73db7ecd7ffd..4a41694919da 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -848,7 +848,6 @@ MODULE_DEVICE_TABLE(i2c, mcp230xx_id);
static struct i2c_driver mcp230xx_driver = {
.driver = {
.name = "mcp230xx",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(mcp23s08_i2c_of_match),
},
.probe = mcp230xx_probe,
@@ -1021,7 +1020,6 @@ static struct spi_driver mcp23s08_driver = {
.id_table = mcp23s08_ids,
.driver = {
.name = "mcp23s08",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(mcp23s08_spi_of_match),
},
};
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index a7b4939cee6d..6989238b276a 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -287,7 +287,6 @@ static struct spi_driver lg4573_driver = {
.remove = lg4573_remove,
.driver = {
.name = "lg4573",
- .owner = THIS_MODULE,
.of_match_table = lg4573_of_match,
},
};
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index b202377135e7..3cf4cf6a6942 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -378,7 +378,6 @@ static struct spi_driver ld9040_driver = {
.remove = ld9040_remove,
.driver = {
.name = "panel-samsung-ld9040",
- .owner = THIS_MODULE,
.of_match_table = ld9040_of_match,
},
};
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index e5c7a969f28b..a38af68cf326 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -783,7 +783,7 @@ static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
}
ssip_set_rxstate(ssi, RECEIVING);
if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) {
- dev_err(&cl->device, "START TRANS id %d expeceted %d\n",
+ dev_err(&cl->device, "START TRANS id %d expected %d\n",
SSIP_MSG_ID(cmd), ssi->rxid);
spin_unlock(&ssi->lock);
goto out1;
diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi.c
index 089c6c3feb3e..f6d3100b7a32 100644
--- a/drivers/hsi/controllers/omap_ssi.c
+++ b/drivers/hsi/controllers/omap_ssi.c
@@ -295,27 +295,14 @@ static int __init ssi_get_iomem(struct platform_device *pd,
const char *name, void __iomem **pbase, dma_addr_t *phy)
{
struct resource *mem;
- struct resource *ioarea;
void __iomem *base;
struct hsi_controller *ssi = platform_get_drvdata(pd);
mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
- if (!mem) {
- dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
- return -ENXIO;
- }
- ioarea = devm_request_mem_region(&ssi->device, mem->start,
- resource_size(mem), dev_name(&pd->dev));
- if (!ioarea) {
- dev_err(&pd->dev, "%s IO memory region request failed\n",
- mem->name);
- return -ENXIO;
- }
- base = devm_ioremap(&ssi->device, mem->start, resource_size(mem));
- if (!base) {
- dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
- return -ENXIO;
- }
+ base = devm_ioremap_resource(&ssi->device, mem);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
*pbase = base;
if (phy)
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index 1f8652b3de06..02e66032ae73 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -1111,7 +1111,7 @@ static int __init ssi_port_probe(struct platform_device *pd)
struct omap_ssi_port *omap_port;
struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
- u32 cawake_gpio = 0;
+ int cawake_gpio = 0;
u32 port_id;
int err;
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
index fe9371271ce2..df380d55c58f 100644
--- a/drivers/hsi/hsi.c
+++ b/drivers/hsi/hsi.c
@@ -85,12 +85,14 @@ struct hsi_client *hsi_new_client(struct hsi_port *port,
cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
- return NULL;
+ goto err;
cl->tx_cfg = info->tx_cfg;
if (cl->tx_cfg.channels) {
size = cl->tx_cfg.num_channels * sizeof(*cl->tx_cfg.channels);
cl->tx_cfg.channels = kzalloc(size , GFP_KERNEL);
+ if (!cl->tx_cfg.channels)
+ goto err_tx;
memcpy(cl->tx_cfg.channels, info->tx_cfg.channels, size);
}
@@ -98,6 +100,8 @@ struct hsi_client *hsi_new_client(struct hsi_port *port,
if (cl->rx_cfg.channels) {
size = cl->rx_cfg.num_channels * sizeof(*cl->rx_cfg.channels);
cl->rx_cfg.channels = kzalloc(size , GFP_KERNEL);
+ if (!cl->rx_cfg.channels)
+ goto err_rx;
memcpy(cl->rx_cfg.channels, info->rx_cfg.channels, size);
}
@@ -114,6 +118,12 @@ struct hsi_client *hsi_new_client(struct hsi_port *port,
}
return cl;
+err_rx:
+ kfree(cl->tx_cfg.channels);
+err_tx:
+ kfree(cl);
+err:
+ return NULL;
}
EXPORT_SYMBOL_GPL(hsi_new_client);
@@ -300,7 +310,6 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
if (device_register(&cl->device) < 0) {
pr_err("hsi: failed to register client: %s\n", name);
put_device(&cl->device);
- goto err3;
}
return;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 3d70e36c918e..3782636562a1 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -63,9 +63,6 @@ enum hv_cpuid_function {
/* Define version of the synthetic interrupt controller. */
#define HV_SYNIC_VERSION (1)
-/* Define the expected SynIC version. */
-#define HV_SYNIC_VERSION_1 (0x1)
-
/* Define synthetic interrupt controller message constants. */
#define HV_MESSAGE_SIZE (256)
#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
@@ -105,8 +102,6 @@ enum hv_message_type {
HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
};
-/* Define the number of synthetic interrupt sources. */
-#define HV_SYNIC_SINT_COUNT (16)
#define HV_SYNIC_STIMER_COUNT (4)
/* Define invalid partition identifier. */
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 11955467fc0f..202c1fbb3407 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -157,7 +157,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id);
static struct spi_driver ad7314_driver = {
.driver = {
.name = "ad7314",
- .owner = THIS_MODULE,
},
.probe = ad7314_probe,
.remove = ad7314_remove,
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index 04c08c2f79b8..69e0bb97e597 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -234,7 +234,6 @@ MODULE_DEVICE_TABLE(spi, adcxx_ids);
static struct spi_driver adcxx_driver = {
.driver = {
.name = "adcxx",
- .owner = THIS_MODULE,
},
.id_table = adcxx_ids,
.probe = adcxx_probe,
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 3eff73b6220d..4fd9e4de1972 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -237,7 +237,6 @@ static int ads7871_remove(struct spi_device *spi)
static struct spi_driver ads7871_driver = {
.driver = {
.name = DEVICE_NAME,
- .owner = THIS_MODULE,
},
.probe = ads7871_probe,
diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c
index 5994cf68e0a4..ec02f4f0d67a 100644
--- a/drivers/hwmon/adt7310.c
+++ b/drivers/hwmon/adt7310.c
@@ -104,7 +104,6 @@ MODULE_DEVICE_TABLE(spi, adt7310_id);
static struct spi_driver adt7310_driver = {
.driver = {
.name = "adt7310",
- .owner = THIS_MODULE,
.pm = ADT7X10_DEV_PM_OPS,
},
.probe = adt7310_spi_probe,
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 9296e9daf774..583f883a4cfe 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -199,7 +199,6 @@ MODULE_DEVICE_TABLE(spi, lm70_ids);
static struct spi_driver lm70_driver = {
.driver = {
.name = "lm70",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(lm70_of_ids),
},
.id_table = lm70_ids,
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index f67d71ee8386..36544c4f653c 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -277,7 +277,6 @@ MODULE_DEVICE_TABLE(spi, max1111_ids);
static struct spi_driver max1111_driver = {
.driver = {
.name = "max1111",
- .owner = THIS_MODULE,
},
.id_table = max1111_ids,
.probe = max1111_probe,
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 98ba761cbb9c..923f56598d4b 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -263,7 +263,6 @@ MODULE_DEVICE_TABLE(spi, kxsd9_id);
static struct spi_driver kxsd9_driver = {
.driver = {
.name = "kxsd9",
- .owner = THIS_MODULE,
},
.probe = kxsd9_probe,
.remove = kxsd9_remove,
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 54b61a3961c3..f71b0d391272 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -64,7 +64,6 @@ MODULE_DEVICE_TABLE(spi, st_accel_id_table);
static struct spi_driver st_accel_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "st-accel-spi",
},
.probe = st_accel_spi_probe,
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 70f78c3062a7..21e19b60e2b9 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -509,7 +509,6 @@ MODULE_DEVICE_TABLE(spi, ad7266_id);
static struct spi_driver ad7266_driver = {
.driver = {
.name = "ad7266",
- .owner = THIS_MODULE,
},
.probe = ad7266_probe,
.remove = ad7266_remove,
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index 4a8c0a2f49b6..62bb8f7ce4a0 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -378,7 +378,6 @@ MODULE_DEVICE_TABLE(spi, ad7298_id);
static struct spi_driver ad7298_driver = {
.driver = {
.name = "ad7298",
- .owner = THIS_MODULE,
},
.probe = ad7298_probe,
.remove = ad7298_remove,
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index ce400ec176f1..be85c2a0ad97 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -302,7 +302,6 @@ MODULE_DEVICE_TABLE(spi, ad7476_id);
static struct spi_driver ad7476_driver = {
.driver = {
.name = "ad7476",
- .owner = THIS_MODULE,
},
.probe = ad7476_probe,
.remove = ad7476_remove,
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index c19f8fd1b4b7..cf172d58cd44 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -440,7 +440,6 @@ MODULE_DEVICE_TABLE(spi, ad7791_spi_ids);
static struct spi_driver ad7791_driver = {
.driver = {
.name = "ad7791",
- .owner = THIS_MODULE,
},
.probe = ad7791_probe,
.remove = ad7791_remove,
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index b84922a4b32e..eea0c79111e7 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -852,7 +852,6 @@ MODULE_DEVICE_TABLE(spi, ad7793_id);
static struct spi_driver ad7793_driver = {
.driver = {
.name = "ad7793",
- .owner = THIS_MODULE,
},
.probe = ad7793_probe,
.remove = ad7793_remove,
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 2fd012ee99f5..2d3c397e66ad 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -356,7 +356,6 @@ MODULE_DEVICE_TABLE(spi, ad7887_id);
static struct spi_driver ad7887_driver = {
.driver = {
.name = "ad7887",
- .owner = THIS_MODULE,
},
.probe = ad7887_probe,
.remove = ad7887_remove,
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 28732c28e819..45e29ccd824f 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -357,7 +357,6 @@ MODULE_DEVICE_TABLE(spi, ad7923_id);
static struct spi_driver ad7923_driver = {
.driver = {
.name = "ad7923",
- .owner = THIS_MODULE,
},
.probe = ad7923_probe,
.remove = ad7923_remove,
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 54a8302aaace..41d495c6035e 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -509,7 +509,6 @@ static struct spi_driver max1027_driver = {
.driver = {
.name = "max1027",
.of_match_table = of_match_ptr(max1027_adc_dt_ids),
- .owner = THIS_MODULE,
},
.probe = max1027_probe,
.remove = max1027_remove,
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 41a21e986c1a..8569c8e1f4b2 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -405,7 +405,6 @@ static struct spi_driver mcp320x_driver = {
.driver = {
.name = "mcp320x",
.of_match_table = of_match_ptr(mcp320x_dt_ids),
- .owner = THIS_MODULE,
},
.probe = mcp320x_probe,
.remove = mcp320x_remove,
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index 98c0d2b444bf..ff6f7f63c8d9 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -192,7 +192,6 @@ static struct spi_driver adc128_driver = {
.driver = {
.name = "adc128s052",
.of_match_table = of_match_ptr(adc128_of_match),
- .owner = THIS_MODULE,
},
.probe = adc128_probe,
.remove = adc128_remove,
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index 32b82a2dc894..102c7174da5b 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -200,7 +200,6 @@ MODULE_DEVICE_TABLE(spi, ad8366_id);
static struct spi_driver ad8366_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
},
.probe = ad8366_probe,
.remove = ad8366_remove,
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index d338bb595db3..ea7adb638d99 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -700,7 +700,6 @@ static struct spi_driver ssp_driver = {
.remove = ssp_remove,
.driver = {
.pm = &ssp_pm_ops,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ssp_of_match),
.name = "sensorhub"
},
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index c067e6821496..9e4d2c18b554 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -568,7 +568,6 @@ MODULE_DEVICE_TABLE(spi, ad5064_spi_ids);
static struct spi_driver ad5064_spi_driver = {
.driver = {
.name = "ad5064",
- .owner = THIS_MODULE,
},
.probe = ad5064_spi_probe,
.remove = ad5064_spi_remove,
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 64634d7f578e..8ba0e9c50176 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -549,7 +549,6 @@ MODULE_DEVICE_TABLE(spi, ad5360_ids);
static struct spi_driver ad5360_driver = {
.driver = {
.name = "ad5360",
- .owner = THIS_MODULE,
},
.probe = ad5360_probe,
.remove = ad5360_remove,
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 130de9b3e0bf..97d2c5111f43 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -519,7 +519,6 @@ MODULE_DEVICE_TABLE(spi, ad5380_spi_ids);
static struct spi_driver ad5380_spi_driver = {
.driver = {
.name = "ad5380",
- .owner = THIS_MODULE,
},
.probe = ad5380_spi_probe,
.remove = ad5380_spi_remove,
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 787ef1d859c6..968712be967f 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -524,7 +524,6 @@ static int ad5421_probe(struct spi_device *spi)
static struct spi_driver ad5421_driver = {
.driver = {
.name = "ad5421",
- .owner = THIS_MODULE,
},
.probe = ad5421_probe,
};
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 07e17d72a3f3..b555552a0d80 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -481,7 +481,6 @@ static int ad5446_spi_remove(struct spi_device *spi)
static struct spi_driver ad5446_spi_driver = {
.driver = {
.name = "ad5446",
- .owner = THIS_MODULE,
},
.probe = ad5446_spi_probe,
.remove = ad5446_spi_remove,
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index 64d7256cbb6d..5f3202339420 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -356,7 +356,6 @@ MODULE_DEVICE_TABLE(spi, ad5449_spi_ids);
static struct spi_driver ad5449_spi_driver = {
.driver = {
.name = "ad5449",
- .owner = THIS_MODULE,
},
.probe = ad5449_spi_probe,
.remove = ad5449_spi_remove,
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index e6e9756580af..4e4c20d6d8b5 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -363,7 +363,6 @@ MODULE_DEVICE_TABLE(spi, ad5504_id);
static struct spi_driver ad5504_driver = {
.driver = {
.name = "ad5504",
- .owner = THIS_MODULE,
},
.probe = ad5504_probe,
.remove = ad5504_remove,
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index e98428df0d44..5489ec43b95d 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -306,7 +306,6 @@ MODULE_DEVICE_TABLE(spi, ad5624r_id);
static struct spi_driver ad5624r_driver = {
.driver = {
.name = "ad5624r",
- .owner = THIS_MODULE,
},
.probe = ad5624r_probe,
.remove = ad5624r_remove,
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 15c73e20272d..d1d8450c19f6 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -395,7 +395,6 @@ MODULE_DEVICE_TABLE(spi, ad5686_id);
static struct spi_driver ad5686_driver = {
.driver = {
.name = "ad5686",
- .owner = THIS_MODULE,
},
.probe = ad5686_probe,
.remove = ad5686_remove,
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index a7c851f62d7c..bfb350a85a16 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -610,7 +610,6 @@ MODULE_DEVICE_TABLE(spi, ad5755_id);
static struct spi_driver ad5755_driver = {
.driver = {
.name = "ad5755",
- .owner = THIS_MODULE,
},
.probe = ad5755_probe,
.id_table = ad5755_id,
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index d0d38165339d..9a547bbf7d2b 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -357,7 +357,6 @@ MODULE_DEVICE_TABLE(spi, ad5764_ids);
static struct spi_driver ad5764_driver = {
.driver = {
.name = "ad5764",
- .owner = THIS_MODULE,
},
.probe = ad5764_probe,
.remove = ad5764_remove,
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 5ba785f18589..33e4ae5c42f8 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -461,7 +461,6 @@ MODULE_DEVICE_TABLE(spi, ad5791_id);
static struct spi_driver ad5791_driver = {
.driver = {
.name = "ad5791",
- .owner = THIS_MODULE,
},
.probe = ad5791_probe,
.remove = ad5791_remove,
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 18a4ad5ff8c5..e690dd11e99f 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -297,7 +297,6 @@ static struct spi_driver ad7303_driver = {
.driver = {
.name = "ad7303",
.of_match_table = of_match_ptr(ad7303_spi_of_match),
- .owner = THIS_MODULE,
},
.probe = ad7303_probe,
.remove = ad7303_remove,
diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c
index 92cf4ca6981d..3854d201a5d6 100644
--- a/drivers/iio/dac/mcp4922.c
+++ b/drivers/iio/dac/mcp4922.c
@@ -203,7 +203,6 @@ MODULE_DEVICE_TABLE(spi, mcp4922_id);
static struct spi_driver mcp4922_driver = {
.driver = {
.name = "mcp4922",
- .owner = THIS_MODULE,
},
.probe = mcp4922_probe,
.remove = mcp4922_remove,
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 50ed8d1ca45a..44a30f286de1 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -1027,7 +1027,6 @@ MODULE_DEVICE_TABLE(spi, ad9523_id);
static struct spi_driver ad9523_driver = {
.driver = {
.name = "ad9523",
- .owner = THIS_MODULE,
},
.probe = ad9523_probe,
.remove = ad9523_remove,
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 73f27e0a08dd..d2d824b446f5 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -634,7 +634,6 @@ static struct spi_driver adf4350_driver = {
.driver = {
.name = "adf4350",
.of_match_table = of_match_ptr(adf4350_of_match),
- .owner = THIS_MODULE,
},
.probe = adf4350_probe,
.remove = adf4350_remove,
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
index add509837269..ad31a1372a04 100644
--- a/drivers/iio/gyro/adis16080.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -228,7 +228,6 @@ MODULE_DEVICE_TABLE(spi, adis16080_ids);
static struct spi_driver adis16080_driver = {
.driver = {
.name = "adis16080",
- .owner = THIS_MODULE,
},
.probe = adis16080_probe,
.remove = adis16080_remove,
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index 8d08c7ed1ea6..e5241f41e65e 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -167,7 +167,6 @@ static int adis16130_probe(struct spi_device *spi)
static struct spi_driver adis16130_driver = {
.driver = {
.name = "adis16130",
- .owner = THIS_MODULE,
},
.probe = adis16130_probe,
};
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index 26de876b223d..f8d1c2210066 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -570,7 +570,6 @@ MODULE_DEVICE_TABLE(spi, adis16136_ids);
static struct spi_driver adis16136_driver = {
.driver = {
.name = "adis16136",
- .owner = THIS_MODULE,
},
.id_table = adis16136_ids,
.probe = adis16136_probe,
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index 00c6ad9bf35f..7da8825f4791 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -435,7 +435,6 @@ MODULE_DEVICE_TABLE(spi, adis16260_id);
static struct spi_driver adis16260_driver = {
.driver = {
.name = "adis16260",
- .owner = THIS_MODULE,
},
.probe = adis16260_probe,
.remove = adis16260_remove,
diff --git a/drivers/iio/gyro/adxrs450.c b/drivers/iio/gyro/adxrs450.c
index eb0e08ec9e20..a330d4288bb0 100644
--- a/drivers/iio/gyro/adxrs450.c
+++ b/drivers/iio/gyro/adxrs450.c
@@ -456,7 +456,6 @@ MODULE_DEVICE_TABLE(spi, adxrs450_id);
static struct spi_driver adxrs450_driver = {
.driver = {
.name = "adxrs450",
- .owner = THIS_MODULE,
},
.probe = adxrs450_probe,
.id_table = adxrs450_id,
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index e59bead6bc3c..d2b7a5fa344c 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -60,7 +60,6 @@ MODULE_DEVICE_TABLE(spi, st_gyro_id_table);
static struct spi_driver st_gyro_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "st-gyro-spi",
},
.probe = st_gyro_spi_probe,
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index abc4c50de9e8..0618f831ecd4 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -986,7 +986,6 @@ MODULE_DEVICE_TABLE(spi, adis16400_id);
static struct spi_driver adis16400_driver = {
.driver = {
.name = "adis16400",
- .owner = THIS_MODULE,
},
.id_table = adis16400_id,
.probe = adis16400_probe,
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index b94bfd3f595b..2485b88ee1b6 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -896,7 +896,6 @@ MODULE_DEVICE_TABLE(spi, adis16480_ids);
static struct spi_driver adis16480_driver = {
.driver = {
.name = "adis16480",
- .owner = THIS_MODULE,
},
.id_table = adis16480_ids,
.probe = adis16480_probe,
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 0abca2c6afa6..6325e7dc8e03 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -58,7 +58,6 @@ MODULE_DEVICE_TABLE(spi, st_magn_id_table);
static struct spi_driver st_magn_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "st-magn-spi",
},
.probe = st_magn_spi_probe,
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 08ee6e88c79f..aaa0c4ba91a7 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -117,7 +117,6 @@ MODULE_DEVICE_TABLE(spi, ms5611_id);
static struct spi_driver ms5611_driver = {
.driver = {
.name = "ms5611",
- .owner = THIS_MODULE,
},
.id_table = ms5611_id,
.probe = ms5611_spi_probe,
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 1ffa6d4d349c..40c0692ff1de 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -56,7 +56,6 @@ MODULE_DEVICE_TABLE(spi, st_press_id_table);
static struct spi_driver st_press_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "st-press-spi",
},
.probe = st_press_spi_probe,
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index e95035136889..f4d29d5dbd5f 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -450,7 +450,6 @@ static struct spi_driver as3935_driver = {
.driver = {
.name = "as3935",
.of_match_table = of_match_ptr(as3935_of_match),
- .owner = THIS_MODULE,
.pm = AS3935_PM_OPS,
},
.probe = as3935_probe,
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index a79e50b58bf5..fea315e76bc8 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -113,7 +113,6 @@ static int ad714x_spi_remove(struct spi_device *spi)
static struct spi_driver ad714x_spi_driver = {
.driver = {
.name = "ad714x_captouch",
- .owner = THIS_MODULE,
.pm = &ad714x_spi_pm,
},
.probe = ad714x_spi_probe,
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index da6e76b58dab..3ec03ad88eed 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -120,7 +120,6 @@ static SIMPLE_DEV_PM_OPS(adxl34x_spi_pm, adxl34x_spi_suspend,
static struct spi_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
- .owner = THIS_MODULE,
.pm = &adxl34x_spi_pm,
},
.probe = adxl34x_spi_probe,
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index da4e5bb5e045..9c250ae780d9 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -843,7 +843,6 @@ static SIMPLE_DEV_PM_OPS(ad7877_pm, ad7877_suspend, ad7877_resume);
static struct spi_driver ad7877_driver = {
.driver = {
.name = "ad7877",
- .owner = THIS_MODULE,
.pm = &ad7877_pm,
},
.probe = ad7877_probe,
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index 1a7b1143536e..48033c2689ab 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -149,7 +149,6 @@ static int ad7879_spi_remove(struct spi_device *spi)
static struct spi_driver ad7879_spi_driver = {
.driver = {
.name = "ad7879",
- .owner = THIS_MODULE,
.pm = &ad7879_pm_ops,
},
.probe = ad7879_spi_probe,
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 04edc8f7122f..e431cf63a85d 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1500,7 +1500,6 @@ static int ads7846_remove(struct spi_device *spi)
static struct spi_driver ads7846_driver = {
.driver = {
.name = "ads7846",
- .owner = THIS_MODULE,
.pm = &ads7846_pm,
.of_match_table = of_match_ptr(ads7846_dt_ids),
},
diff --git a/drivers/input/touchscreen/cyttsp4_spi.c b/drivers/input/touchscreen/cyttsp4_spi.c
index b19434cebbf6..ec5f7c74f048 100644
--- a/drivers/input/touchscreen/cyttsp4_spi.c
+++ b/drivers/input/touchscreen/cyttsp4_spi.c
@@ -185,7 +185,6 @@ static int cyttsp4_spi_remove(struct spi_device *spi)
static struct spi_driver cyttsp4_spi_driver = {
.driver = {
.name = CYTTSP4_SPI_NAME,
- .owner = THIS_MODULE,
.pm = &cyttsp4_pm_ops,
},
.probe = cyttsp4_spi_probe,
diff --git a/drivers/input/touchscreen/cyttsp_spi.c b/drivers/input/touchscreen/cyttsp_spi.c
index 4728bcb1916c..bbeeb2488b57 100644
--- a/drivers/input/touchscreen/cyttsp_spi.c
+++ b/drivers/input/touchscreen/cyttsp_spi.c
@@ -182,7 +182,6 @@ static int cyttsp_spi_remove(struct spi_device *spi)
static struct spi_driver cyttsp_spi_driver = {
.driver = {
.name = CY_SPI_NAME,
- .owner = THIS_MODULE,
.pm = &cyttsp_pm_ops,
},
.probe = cyttsp_spi_probe,
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 3f117637e832..d214f22ed305 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -38,6 +38,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-sg.h>
/* read 512 bytes from endpoint 0x86 -> get header + blobs */
@@ -163,7 +164,7 @@ struct sur40_state {
};
struct sur40_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
@@ -420,7 +421,7 @@ static void sur40_process_video(struct sur40_state *sur40)
dev_dbg(sur40->dev, "header acquired\n");
- sgt = vb2_dma_sg_plane_desc(&new_buf->vb, 0);
+ sgt = vb2_dma_sg_plane_desc(&new_buf->vb.vb2_buf, 0);
result = usb_sg_init(&sgr, sur40->usbdev,
usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), 0,
@@ -443,15 +444,15 @@ static void sur40_process_video(struct sur40_state *sur40)
goto err_poll;
/* mark as finished */
- v4l2_get_timestamp(&new_buf->vb.v4l2_buf.timestamp);
- new_buf->vb.v4l2_buf.sequence = sur40->sequence++;
- new_buf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
- vb2_buffer_done(&new_buf->vb, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&new_buf->vb.timestamp);
+ new_buf->vb.sequence = sur40->sequence++;
+ new_buf->vb.field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
dev_dbg(sur40->dev, "buffer marked done\n");
return;
err_poll:
- vb2_buffer_done(&new_buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
/* Initialize input device parameters. */
@@ -643,10 +644,11 @@ static void sur40_disconnect(struct usb_interface *interface)
* minimum number: many DMA engines need a minimum of 2 buffers in the
* queue and you need to have another available for userspace processing.
*/
-static int sur40_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int sur40_queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct sur40_state *sur40 = vb2_get_drv_priv(q);
if (q->num_buffers + *nbuffers < 3)
@@ -701,7 +703,7 @@ static void return_all_buffers(struct sur40_state *sur40,
spin_lock(&sur40->qlock);
list_for_each_entry_safe(buf, node, &sur40->buf_list, list) {
- vb2_buffer_done(&buf->vb, state);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
list_del(&buf->list);
}
spin_unlock(&sur40->qlock);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 0f65d02eeb26..f41f23318484 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -752,7 +752,6 @@ static SIMPLE_DEV_PM_OPS(tsc2005_pm_ops, tsc2005_suspend, tsc2005_resume);
static struct spi_driver tsc2005_driver = {
.driver = {
.name = "tsc2005",
- .owner = THIS_MODULE,
.pm = &tsc2005_pm_ops,
},
.probe = tsc2005_probe,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index cbe6a890a93a..b9094e9da537 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -48,6 +48,13 @@ config OF_IOMMU
def_bool y
depends on OF && IOMMU_API
+# IOMMU-agnostic DMA-mapping layer
+config IOMMU_DMA
+ bool
+ depends on NEED_SG_DMA_LENGTH
+ select IOMMU_API
+ select IOMMU_IOVA
+
config FSL_PAMU
bool "Freescale IOMMU support"
depends on PPC32
@@ -134,6 +141,16 @@ config INTEL_IOMMU
and include PCI device scope covered by these DMA
remapping devices.
+config INTEL_IOMMU_SVM
+ bool "Support for Shared Virtual Memory with Intel IOMMU"
+ depends on INTEL_IOMMU && X86
+ select PCI_PASID
+ select MMU_NOTIFIER
+ help
+ Shared Virtual Memory (SVM) provides a facility for devices
+ to access DMA resources through process address space by
+ means of a Process Address Space ID (PASID).
+
config INTEL_IOMMU_DEFAULT_ON
def_bool y
prompt "Enable Intel DMA Remapping Devices by default"
@@ -361,6 +378,7 @@ config ARM_SMMU_V3
depends on ARM64 && PCI
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
+ select GENERIC_MSI_IRQ_DOMAIN
help
Support for implementations of the ARM System MMU architecture
version 3 providing translation support to a PCIe root complex.
@@ -368,4 +386,11 @@ config ARM_SMMU_V3
Say Y here if your system includes an IOMMU device implementing
the ARM SMMUv3 architecture.
+config S390_IOMMU
+ def_bool y if S390 && PCI
+ depends on S390 && PCI
+ select IOMMU_API
+ help
+ Support for the IOMMU API for s390 PCI devices.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index c6dcc513d711..68faca02225d 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
+obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o
+obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
@@ -23,3 +25,4 @@ obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o
obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
+obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 532e2a211fe1..0d533bba4ad1 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -89,8 +89,6 @@ static struct dma_map_ops amd_iommu_dma_ops;
struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct list_head dev_data_list; /* For global dev_data_list */
- struct list_head alias_list; /* Link alias-groups together */
- struct iommu_dev_data *alias_data;/* The alias dev_data */
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
@@ -136,8 +134,6 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
if (!dev_data)
return NULL;
- INIT_LIST_HEAD(&dev_data->alias_list);
-
dev_data->devid = devid;
spin_lock_irqsave(&dev_data_list_lock, flags);
@@ -147,17 +143,6 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
return dev_data;
}
-static void free_dev_data(struct iommu_dev_data *dev_data)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev_data_list_lock, flags);
- list_del(&dev_data->dev_data_list);
- spin_unlock_irqrestore(&dev_data_list_lock, flags);
-
- kfree(dev_data);
-}
-
static struct iommu_dev_data *search_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -311,73 +296,10 @@ out:
iommu_group_put(group);
}
-static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
-{
- *(u16 *)data = alias;
- return 0;
-}
-
-static u16 get_alias(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- u16 devid, ivrs_alias, pci_alias;
-
- devid = get_device_id(dev);
- ivrs_alias = amd_iommu_alias_table[devid];
- pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
-
- if (ivrs_alias == pci_alias)
- return ivrs_alias;
-
- /*
- * DMA alias showdown
- *
- * The IVRS is fairly reliable in telling us about aliases, but it
- * can't know about every screwy device. If we don't have an IVRS
- * reported alias, use the PCI reported alias. In that case we may
- * still need to initialize the rlookup and dev_table entries if the
- * alias is to a non-existent device.
- */
- if (ivrs_alias == devid) {
- if (!amd_iommu_rlookup_table[pci_alias]) {
- amd_iommu_rlookup_table[pci_alias] =
- amd_iommu_rlookup_table[devid];
- memcpy(amd_iommu_dev_table[pci_alias].data,
- amd_iommu_dev_table[devid].data,
- sizeof(amd_iommu_dev_table[pci_alias].data));
- }
-
- return pci_alias;
- }
-
- pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
- "for device %s[%04x:%04x], kernel reported alias "
- "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
- PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
- PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
- PCI_FUNC(pci_alias));
-
- /*
- * If we don't have a PCI DMA alias and the IVRS alias is on the same
- * bus, then the IVRS table may know about a quirk that we don't.
- */
- if (pci_alias == devid &&
- PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
- pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
- pdev->dma_alias_devfn = ivrs_alias & 0xff;
- pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
- PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
- dev_name(dev));
- }
-
- return ivrs_alias;
-}
-
static int iommu_init_device(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data;
- u16 alias;
if (dev->archdata.iommu)
return 0;
@@ -386,24 +308,6 @@ static int iommu_init_device(struct device *dev)
if (!dev_data)
return -ENOMEM;
- alias = get_alias(dev);
-
- if (alias != dev_data->devid) {
- struct iommu_dev_data *alias_data;
-
- alias_data = find_dev_data(alias);
- if (alias_data == NULL) {
- pr_err("AMD-Vi: Warning: Unhandled device %s\n",
- dev_name(dev));
- free_dev_data(dev_data);
- return -ENOTSUPP;
- }
- dev_data->alias_data = alias_data;
-
- /* Add device to the alias_list */
- list_add(&dev_data->alias_list, &alias_data->alias_list);
- }
-
if (pci_iommuv2_capable(pdev)) {
struct amd_iommu *iommu;
@@ -445,9 +349,6 @@ static void iommu_uninit_device(struct device *dev)
iommu_group_remove_device(dev);
- /* Unlink from alias, it may change if another device is re-plugged */
- dev_data->alias_data = NULL;
-
/* Remove dma-ops */
dev->archdata.dma_ops = NULL;
@@ -633,7 +534,7 @@ static void iommu_poll_events(struct amd_iommu *iommu)
while (head != tail) {
iommu_print_event(iommu, iommu->evt_buf + head);
- head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
+ head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
}
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
@@ -783,7 +684,7 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
u8 *target;
target = iommu->cmd_buf + tail;
- tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
+ tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
/* Copy command to buffer */
memcpy(target, cmd, sizeof(*cmd));
@@ -950,15 +851,13 @@ static int iommu_queue_command_sync(struct amd_iommu *iommu,
u32 left, tail, head, next_tail;
unsigned long flags;
- WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
-
again:
spin_lock_irqsave(&iommu->lock, flags);
head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
- next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
- left = (head - next_tail) % iommu->cmd_buf_size;
+ next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
+ left = (head - next_tail) % CMD_BUFFER_SIZE;
if (left <= 2) {
struct iommu_cmd sync_cmd;
@@ -1114,11 +1013,15 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
static int device_flush_dte(struct iommu_dev_data *dev_data)
{
struct amd_iommu *iommu;
+ u16 alias;
int ret;
iommu = amd_iommu_rlookup_table[dev_data->devid];
+ alias = amd_iommu_alias_table[dev_data->devid];
ret = iommu_flush_dte(iommu, dev_data->devid);
+ if (!ret && alias != dev_data->devid)
+ ret = iommu_flush_dte(iommu, alias);
if (ret)
return ret;
@@ -1984,27 +1887,33 @@ static void do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
struct amd_iommu *iommu;
+ u16 alias;
bool ats;
iommu = amd_iommu_rlookup_table[dev_data->devid];
+ alias = amd_iommu_alias_table[dev_data->devid];
ats = dev_data->ats.enabled;
/* Update data structures */
dev_data->domain = domain;
list_add(&dev_data->list, &domain->dev_list);
- set_dte_entry(dev_data->devid, domain, ats);
/* Do reference counting */
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
- /* Flush the DTE entry */
+ /* Update device table */
+ set_dte_entry(dev_data->devid, domain, ats);
+ if (alias != dev_data->devid)
+ set_dte_entry(dev_data->devid, domain, ats);
+
device_flush_dte(dev_data);
}
static void do_detach(struct iommu_dev_data *dev_data)
{
struct amd_iommu *iommu;
+ u16 alias;
/*
* First check if the device is still attached. It might already
@@ -2016,6 +1925,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
return;
iommu = amd_iommu_rlookup_table[dev_data->devid];
+ alias = amd_iommu_alias_table[dev_data->devid];
/* decrease reference counters */
dev_data->domain->dev_iommu[iommu->index] -= 1;
@@ -2025,6 +1935,8 @@ static void do_detach(struct iommu_dev_data *dev_data)
dev_data->domain = NULL;
list_del(&dev_data->list);
clear_dte_entry(dev_data->devid);
+ if (alias != dev_data->devid)
+ clear_dte_entry(alias);
/* Flush the DTE entry */
device_flush_dte(dev_data);
@@ -2037,29 +1949,23 @@ static void do_detach(struct iommu_dev_data *dev_data)
static int __attach_device(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
- struct iommu_dev_data *head, *entry;
int ret;
+ /*
+ * Must be called with IRQs disabled. Warn here to detect early
+ * when its not.
+ */
+ WARN_ON(!irqs_disabled());
+
/* lock domain */
spin_lock(&domain->lock);
- head = dev_data;
-
- if (head->alias_data != NULL)
- head = head->alias_data;
-
- /* Now we have the root of the alias group, if any */
-
ret = -EBUSY;
- if (head->domain != NULL)
+ if (dev_data->domain != NULL)
goto out_unlock;
/* Attach alias group root */
- do_attach(head, domain);
-
- /* Attach other devices in the alias group */
- list_for_each_entry(entry, &head->alias_list, alias_list)
- do_attach(entry, domain);
+ do_attach(dev_data, domain);
ret = 0;
@@ -2209,26 +2115,24 @@ static int attach_device(struct device *dev,
*/
static void __detach_device(struct iommu_dev_data *dev_data)
{
- struct iommu_dev_data *head, *entry;
struct protection_domain *domain;
- unsigned long flags;
- BUG_ON(!dev_data->domain);
-
- domain = dev_data->domain;
+ /*
+ * Must be called with IRQs disabled. Warn here to detect early
+ * when its not.
+ */
+ WARN_ON(!irqs_disabled());
- spin_lock_irqsave(&domain->lock, flags);
+ if (WARN_ON(!dev_data->domain))
+ return;
- head = dev_data;
- if (head->alias_data != NULL)
- head = head->alias_data;
+ domain = dev_data->domain;
- list_for_each_entry(entry, &head->alias_list, alias_list)
- do_detach(entry);
+ spin_lock(&domain->lock);
- do_detach(head);
+ do_detach(dev_data);
- spin_unlock_irqrestore(&domain->lock, flags);
+ spin_unlock(&domain->lock);
}
/*
@@ -3198,6 +3102,7 @@ static const struct iommu_ops amd_iommu_ops = {
.iova_to_phys = amd_iommu_iova_to_phys,
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
+ .device_group = pci_device_group,
.get_dm_regions = amd_iommu_get_dm_regions,
.put_dm_regions = amd_iommu_put_dm_regions,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 9f86ecff38aa..013bdfff2d4d 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -408,20 +408,6 @@ static inline int ivhd_entry_length(u8 *ivhd)
}
/*
- * This function reads the last device id the IOMMU has to handle from the PCI
- * capability header for this IOMMU
- */
-static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
-{
- u32 cap;
-
- cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
- update_last_devid(PCI_DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
-
- return 0;
-}
-
-/*
* After reading the highest device id from the IOMMU PCI capability header
* this function looks if there is a higher device id defined in the ACPI table
*/
@@ -433,14 +419,13 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
p += sizeof(*h);
end += h->length;
- find_last_devid_on_pci(PCI_BUS_NUM(h->devid),
- PCI_SLOT(h->devid),
- PCI_FUNC(h->devid),
- h->cap_ptr);
-
while (p < end) {
dev = (struct ivhd_entry *)p;
switch (dev->type) {
+ case IVHD_DEV_ALL:
+ /* Use maximum BDF value for DEV_ALL */
+ update_last_devid(0xffff);
+ break;
case IVHD_DEV_SELECT:
case IVHD_DEV_RANGE_END:
case IVHD_DEV_ALIAS:
@@ -513,17 +498,12 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
* write commands to that buffer later and the IOMMU will execute them
* asynchronously
*/
-static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
+static int __init alloc_command_buffer(struct amd_iommu *iommu)
{
- u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(CMD_BUFFER_SIZE));
-
- if (cmd_buf == NULL)
- return NULL;
-
- iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
+ iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(CMD_BUFFER_SIZE));
- return cmd_buf;
+ return iommu->cmd_buf ? 0 : -ENOMEM;
}
/*
@@ -557,27 +537,20 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
&entry, sizeof(entry));
amd_iommu_reset_cmd_buffer(iommu);
- iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
}
static void __init free_command_buffer(struct amd_iommu *iommu)
{
- free_pages((unsigned long)iommu->cmd_buf,
- get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
+ free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
}
/* allocates the memory where the IOMMU will log its events to */
-static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
+static int __init alloc_event_buffer(struct amd_iommu *iommu)
{
- iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(EVT_BUFFER_SIZE));
+ iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(EVT_BUFFER_SIZE));
- if (iommu->evt_buf == NULL)
- return NULL;
-
- iommu->evt_buf_size = EVT_BUFFER_SIZE;
-
- return iommu->evt_buf;
+ return iommu->evt_buf ? 0 : -ENOMEM;
}
static void iommu_enable_event_buffer(struct amd_iommu *iommu)
@@ -604,15 +577,12 @@ static void __init free_event_buffer(struct amd_iommu *iommu)
}
/* allocates the memory where the IOMMU will log its events to */
-static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
+static int __init alloc_ppr_log(struct amd_iommu *iommu)
{
- iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(PPR_LOG_SIZE));
-
- if (iommu->ppr_log == NULL)
- return NULL;
+ iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(PPR_LOG_SIZE));
- return iommu->ppr_log;
+ return iommu->ppr_log ? 0 : -ENOMEM;
}
static void iommu_enable_ppr_log(struct amd_iommu *iommu)
@@ -835,20 +805,10 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
switch (e->type) {
case IVHD_DEV_ALL:
- DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
- " last device %02x:%02x.%x flags: %02x\n",
- PCI_BUS_NUM(iommu->first_device),
- PCI_SLOT(iommu->first_device),
- PCI_FUNC(iommu->first_device),
- PCI_BUS_NUM(iommu->last_device),
- PCI_SLOT(iommu->last_device),
- PCI_FUNC(iommu->last_device),
- e->flags);
+ DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
- for (dev_i = iommu->first_device;
- dev_i <= iommu->last_device; ++dev_i)
- set_dev_entry_from_acpi(iommu, dev_i,
- e->flags, 0);
+ for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
+ set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
break;
case IVHD_DEV_SELECT:
@@ -1004,17 +964,6 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
return 0;
}
-/* Initializes the device->iommu mapping for the driver */
-static int __init init_iommu_devices(struct amd_iommu *iommu)
-{
- u32 i;
-
- for (i = iommu->first_device; i <= iommu->last_device; ++i)
- set_iommu_for_device(iommu, i);
-
- return 0;
-}
-
static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_command_buffer(iommu);
@@ -1111,12 +1060,10 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->mmio_base)
return -ENOMEM;
- iommu->cmd_buf = alloc_command_buffer(iommu);
- if (!iommu->cmd_buf)
+ if (alloc_command_buffer(iommu))
return -ENOMEM;
- iommu->evt_buf = alloc_event_buffer(iommu);
- if (!iommu->evt_buf)
+ if (alloc_event_buffer(iommu))
return -ENOMEM;
iommu->int_enabled = false;
@@ -1135,8 +1082,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
*/
amd_iommu_rlookup_table[iommu->devid] = NULL;
- init_iommu_devices(iommu);
-
return 0;
}
@@ -1266,11 +1211,6 @@ static int iommu_init_pci(struct amd_iommu *iommu)
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
&misc);
- iommu->first_device = PCI_DEVID(MMIO_GET_BUS(range),
- MMIO_GET_FD(range));
- iommu->last_device = PCI_DEVID(MMIO_GET_BUS(range),
- MMIO_GET_LD(range));
-
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false;
@@ -1308,11 +1248,8 @@ static int iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_v2_present = true;
}
- if (iommu_feature(iommu, FEATURE_PPR)) {
- iommu->ppr_log = alloc_ppr_log(iommu);
- if (!iommu->ppr_log)
- return -ENOMEM;
- }
+ if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
+ return -ENOMEM;
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
amd_iommu_np_cache = true;
@@ -1758,11 +1695,8 @@ static void __init free_on_init_error(void)
free_pages((unsigned long)irq_lookup_table,
get_order(rlookup_table_size));
- if (amd_iommu_irq_cache) {
- kmem_cache_destroy(amd_iommu_irq_cache);
- amd_iommu_irq_cache = NULL;
-
- }
+ kmem_cache_destroy(amd_iommu_irq_cache);
+ amd_iommu_irq_cache = NULL;
free_pages((unsigned long)amd_iommu_rlookup_table,
get_order(rlookup_table_size));
@@ -2201,7 +2135,7 @@ int __init amd_iommu_detect(void)
iommu_detected = 1;
x86_init.iommu.iommu_init = amd_iommu_init;
- return 0;
+ return 1;
}
/****************************************************************************
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 6a0bf1ad5235..b08cf57bf455 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -295,9 +295,9 @@
#define IOMMU_PTE_IR (1ULL << 61)
#define IOMMU_PTE_IW (1ULL << 62)
+#define DTE_FLAG_IOTLB (1ULL << 32)
+#define DTE_FLAG_GV (1ULL << 55)
#define DTE_FLAG_MASK (0x3ffULL << 32)
-#define DTE_FLAG_IOTLB (0x01UL << 32)
-#define DTE_FLAG_GV (0x01ULL << 55)
#define DTE_GLX_SHIFT (56)
#define DTE_GLX_MASK (3)
@@ -517,11 +517,6 @@ struct amd_iommu {
/* pci domain of this IOMMU */
u16 pci_seg;
- /* first device this IOMMU handles. read from PCI */
- u16 first_device;
- /* last device this IOMMU handles. read from PCI */
- u16 last_device;
-
/* start of exclusion range of that IOMMU */
u64 exclusion_start;
/* length of exclusion range of that IOMMU */
@@ -529,11 +524,7 @@ struct amd_iommu {
/* command buffer virtual address */
u8 *cmd_buf;
- /* size of command buffer */
- u32 cmd_buf_size;
- /* size of event buffer */
- u32 evt_buf_size;
/* event buffer virtual address */
u8 *evt_buf;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 286e890e7d64..4e5118a4cd30 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -26,8 +26,10 @@
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/module.h>
+#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -403,6 +405,31 @@ enum pri_resp {
PRI_RESP_SUCC,
};
+enum arm_smmu_msi_index {
+ EVTQ_MSI_INDEX,
+ GERROR_MSI_INDEX,
+ PRIQ_MSI_INDEX,
+ ARM_SMMU_MAX_MSIS,
+};
+
+static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
+ [EVTQ_MSI_INDEX] = {
+ ARM_SMMU_EVTQ_IRQ_CFG0,
+ ARM_SMMU_EVTQ_IRQ_CFG1,
+ ARM_SMMU_EVTQ_IRQ_CFG2,
+ },
+ [GERROR_MSI_INDEX] = {
+ ARM_SMMU_GERROR_IRQ_CFG0,
+ ARM_SMMU_GERROR_IRQ_CFG1,
+ ARM_SMMU_GERROR_IRQ_CFG2,
+ },
+ [PRIQ_MSI_INDEX] = {
+ ARM_SMMU_PRIQ_IRQ_CFG0,
+ ARM_SMMU_PRIQ_IRQ_CFG1,
+ ARM_SMMU_PRIQ_IRQ_CFG2,
+ },
+};
+
struct arm_smmu_cmdq_ent {
/* Common fields */
u8 opcode;
@@ -570,7 +597,6 @@ struct arm_smmu_device {
unsigned int sid_bits;
struct arm_smmu_strtab_cfg strtab_cfg;
- struct list_head list;
};
/* SMMU private data for an IOMMU group */
@@ -605,10 +631,6 @@ struct arm_smmu_domain {
struct iommu_domain domain;
};
-/* Our list of SMMU instances */
-static DEFINE_SPINLOCK(arm_smmu_devices_lock);
-static LIST_HEAD(arm_smmu_devices);
-
struct arm_smmu_option_prop {
u32 opt;
const char *prop;
@@ -1427,7 +1449,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int ret;
- u16 asid;
+ int asid;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
@@ -1439,10 +1461,11 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
&cfg->cdptr_dma, GFP_KERNEL);
if (!cfg->cdptr) {
dev_warn(smmu->dev, "failed to allocate context descriptor\n");
+ ret = -ENOMEM;
goto out_free_asid;
}
- cfg->cd.asid = asid;
+ cfg->cd.asid = (u16)asid;
cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
@@ -1456,7 +1479,7 @@ out_free_asid:
static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
- u16 vmid;
+ int vmid;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
@@ -1464,7 +1487,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
if (IS_ERR_VALUE(vmid))
return vmid;
- cfg->vmid = vmid;
+ cfg->vmid = (u16)vmid;
cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
return 0;
@@ -1726,7 +1749,8 @@ static void __arm_smmu_release_pci_iommudata(void *data)
static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev)
{
struct device_node *of_node;
- struct arm_smmu_device *curr, *smmu = NULL;
+ struct platform_device *smmu_pdev;
+ struct arm_smmu_device *smmu = NULL;
struct pci_bus *bus = pdev->bus;
/* Walk up to the root bus */
@@ -1739,14 +1763,10 @@ static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev)
return NULL;
/* See if we can find an SMMU corresponding to the phandle */
- spin_lock(&arm_smmu_devices_lock);
- list_for_each_entry(curr, &arm_smmu_devices, list) {
- if (curr->dev->of_node == of_node) {
- smmu = curr;
- break;
- }
- }
- spin_unlock(&arm_smmu_devices_lock);
+ smmu_pdev = of_find_device_by_node(of_node);
+ if (smmu_pdev)
+ smmu = platform_get_drvdata(smmu_pdev);
+
of_node_put(of_node);
return smmu;
}
@@ -1902,6 +1922,7 @@ static struct iommu_ops arm_smmu_ops = {
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
+ .device_group = pci_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
@@ -2186,6 +2207,72 @@ static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
1, ARM_SMMU_POLL_TIMEOUT_US);
}
+static void arm_smmu_free_msis(void *data)
+{
+ struct device *dev = data;
+ platform_msi_domain_free_irqs(dev);
+}
+
+static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ phys_addr_t doorbell;
+ struct device *dev = msi_desc_to_dev(desc);
+ struct arm_smmu_device *smmu = dev_get_drvdata(dev);
+ phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
+
+ doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
+ doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
+
+ writeq_relaxed(doorbell, smmu->base + cfg[0]);
+ writel_relaxed(msg->data, smmu->base + cfg[1]);
+ writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
+}
+
+static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
+{
+ struct msi_desc *desc;
+ int ret, nvec = ARM_SMMU_MAX_MSIS;
+ struct device *dev = smmu->dev;
+
+ /* Clear the MSI address regs */
+ writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
+ writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
+
+ if (smmu->features & ARM_SMMU_FEAT_PRI)
+ writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
+ else
+ nvec--;
+
+ if (!(smmu->features & ARM_SMMU_FEAT_MSI))
+ return;
+
+ /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
+ ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
+ if (ret) {
+ dev_warn(dev, "failed to allocate MSIs\n");
+ return;
+ }
+
+ for_each_msi_entry(desc, dev) {
+ switch (desc->platform.msi_index) {
+ case EVTQ_MSI_INDEX:
+ smmu->evtq.q.irq = desc->irq;
+ break;
+ case GERROR_MSI_INDEX:
+ smmu->gerr_irq = desc->irq;
+ break;
+ case PRIQ_MSI_INDEX:
+ smmu->priq.q.irq = desc->irq;
+ break;
+ default: /* Unknown */
+ continue;
+ }
+ }
+
+ /* Add callback to free MSIs on teardown */
+ devm_add_action(dev, arm_smmu_free_msis, dev);
+}
+
static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
{
int ret, irq;
@@ -2199,11 +2286,9 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
return ret;
}
- /* Clear the MSI address regs */
- writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
- writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
+ arm_smmu_setup_msis(smmu);
- /* Request wired interrupt lines */
+ /* Request interrupt lines */
irq = smmu->evtq.q.irq;
if (irq) {
ret = devm_request_threaded_irq(smmu->dev, irq,
@@ -2232,8 +2317,6 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
}
if (smmu->features & ARM_SMMU_FEAT_PRI) {
- writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
-
irq = smmu->priq.q.irq;
if (irq) {
ret = devm_request_threaded_irq(smmu->dev, irq,
@@ -2612,16 +2695,14 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /* Record our private device structure */
+ platform_set_drvdata(pdev, smmu);
+
/* Reset the device */
ret = arm_smmu_device_reset(smmu);
if (ret)
goto out_free_structures;
- /* Record our private device structure */
- INIT_LIST_HEAD(&smmu->list);
- spin_lock(&arm_smmu_devices_lock);
- list_add(&smmu->list, &arm_smmu_devices);
- spin_unlock(&arm_smmu_devices_lock);
return 0;
out_free_structures:
@@ -2631,21 +2712,7 @@ out_free_structures:
static int arm_smmu_device_remove(struct platform_device *pdev)
{
- struct arm_smmu_device *curr, *smmu = NULL;
- struct device *dev = &pdev->dev;
-
- spin_lock(&arm_smmu_devices_lock);
- list_for_each_entry(curr, &arm_smmu_devices, list) {
- if (curr->dev == dev) {
- smmu = curr;
- list_del(&smmu->list);
- break;
- }
- }
- spin_unlock(&arm_smmu_devices_lock);
-
- if (!smmu)
- return -ENODEV;
+ struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
arm_smmu_device_disable(smmu);
arm_smmu_free_structures(smmu);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 48a39dfa9777..47dc7a793f5c 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -70,6 +70,18 @@
((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
? 0x400 : 0))
+#ifdef CONFIG_64BIT
+#define smmu_writeq writeq_relaxed
+#else
+#define smmu_writeq(reg64, addr) \
+ do { \
+ u64 __val = (reg64); \
+ void __iomem *__addr = (addr); \
+ writel_relaxed(__val >> 32, __addr + 4); \
+ writel_relaxed(__val, __addr); \
+ } while (0)
+#endif
+
/* Configuration registers */
#define ARM_SMMU_GR0_sCR0 0x0
#define sCR0_CLIENTPD (1 << 0)
@@ -185,10 +197,8 @@
#define ARM_SMMU_CB_SCTLR 0x0
#define ARM_SMMU_CB_RESUME 0x8
#define ARM_SMMU_CB_TTBCR2 0x10
-#define ARM_SMMU_CB_TTBR0_LO 0x20
-#define ARM_SMMU_CB_TTBR0_HI 0x24
-#define ARM_SMMU_CB_TTBR1_LO 0x28
-#define ARM_SMMU_CB_TTBR1_HI 0x2c
+#define ARM_SMMU_CB_TTBR0 0x20
+#define ARM_SMMU_CB_TTBR1 0x28
#define ARM_SMMU_CB_TTBCR 0x30
#define ARM_SMMU_CB_S1_MAIR0 0x38
#define ARM_SMMU_CB_S1_MAIR1 0x3c
@@ -226,7 +236,7 @@
#define TTBCR2_SEP_SHIFT 15
#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
-#define TTBRn_HI_ASID_SHIFT 16
+#define TTBRn_ASID_SHIFT 48
#define FSR_MULTI (1 << 31)
#define FSR_SS (1 << 30)
@@ -695,12 +705,12 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
u32 reg;
+ u64 reg64;
bool stage1;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *cb_base, *gr0_base, *gr1_base;
+ void __iomem *cb_base, *gr1_base;
- gr0_base = ARM_SMMU_GR0(smmu);
gr1_base = ARM_SMMU_GR1(smmu);
stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
@@ -738,22 +748,17 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
/* TTBRs */
if (stage1) {
- reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
- reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32;
- reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
-
- reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
- reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32;
- reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI);
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+
+ reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
+ smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
+
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+ reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
+ smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
} else {
- reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
- reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+ reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
+ smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
}
/* TTBCR */
@@ -1212,17 +1217,15 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
/* ATS1 registers can only be written atomically */
va = iova & ~0xfffUL;
-#ifdef CONFIG_64BIT
if (smmu->version == ARM_SMMU_V2)
- writeq_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
+ smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
else
-#endif
writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
!(tmp & ATSR_ACTIVE), 5, 50)) {
dev_err(dev,
- "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
+ "iova to phys timed out on %pad. Falling back to software table walk.\n",
&iova);
return ops->iova_to_phys(ops, iova);
}
@@ -1292,33 +1295,25 @@ static void __arm_smmu_release_pci_iommudata(void *data)
kfree(data);
}
-static int arm_smmu_add_pci_device(struct pci_dev *pdev)
+static int arm_smmu_init_pci_device(struct pci_dev *pdev,
+ struct iommu_group *group)
{
- int i, ret;
- u16 sid;
- struct iommu_group *group;
struct arm_smmu_master_cfg *cfg;
-
- group = iommu_group_get_for_dev(&pdev->dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ u16 sid;
+ int i;
cfg = iommu_group_get_iommudata(group);
if (!cfg) {
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg) {
- ret = -ENOMEM;
- goto out_put_group;
- }
+ if (!cfg)
+ return -ENOMEM;
iommu_group_set_iommudata(group, cfg,
__arm_smmu_release_pci_iommudata);
}
- if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) {
- ret = -ENOSPC;
- goto out_put_group;
- }
+ if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
+ return -ENOSPC;
/*
* Assume Stream ID == Requester ID for now.
@@ -1334,16 +1329,13 @@ static int arm_smmu_add_pci_device(struct pci_dev *pdev)
cfg->streamids[cfg->num_streamids++] = sid;
return 0;
-out_put_group:
- iommu_group_put(group);
- return ret;
}
-static int arm_smmu_add_platform_device(struct device *dev)
+static int arm_smmu_init_platform_device(struct device *dev,
+ struct iommu_group *group)
{
- struct iommu_group *group;
- struct arm_smmu_master *master;
struct arm_smmu_device *smmu = find_smmu_for_device(dev);
+ struct arm_smmu_master *master;
if (!smmu)
return -ENODEV;
@@ -1352,21 +1344,20 @@ static int arm_smmu_add_platform_device(struct device *dev)
if (!master)
return -ENODEV;
- /* No automatic group creation for platform devices */
- group = iommu_group_alloc();
- if (IS_ERR(group))
- return PTR_ERR(group);
-
iommu_group_set_iommudata(group, &master->cfg, NULL);
- return iommu_group_add_device(group, dev);
+
+ return 0;
}
static int arm_smmu_add_device(struct device *dev)
{
- if (dev_is_pci(dev))
- return arm_smmu_add_pci_device(to_pci_dev(dev));
+ struct iommu_group *group;
+
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
- return arm_smmu_add_platform_device(dev);
+ return 0;
}
static void arm_smmu_remove_device(struct device *dev)
@@ -1374,6 +1365,32 @@ static void arm_smmu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
}
+static struct iommu_group *arm_smmu_device_group(struct device *dev)
+{
+ struct iommu_group *group;
+ int ret;
+
+ if (dev_is_pci(dev))
+ group = pci_device_group(dev);
+ else
+ group = generic_device_group(dev);
+
+ if (IS_ERR(group))
+ return group;
+
+ if (dev_is_pci(dev))
+ ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
+ else
+ ret = arm_smmu_init_platform_device(dev, group);
+
+ if (ret) {
+ iommu_group_put(group);
+ group = ERR_PTR(ret);
+ }
+
+ return group;
+}
+
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
@@ -1430,6 +1447,7 @@ static struct iommu_ops arm_smmu_ops = {
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
+ .device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
new file mode 100644
index 000000000000..3a20db4f8604
--- /dev/null
+++ b/drivers/iommu/dma-iommu.c
@@ -0,0 +1,524 @@
+/*
+ * A fairly generic DMA-API to IOMMU-API glue layer.
+ *
+ * Copyright (C) 2014-2015 ARM Ltd.
+ *
+ * based in part on arch/arm/mm/dma-mapping.c:
+ * Copyright (C) 2000-2004 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-iommu.h>
+#include <linux/huge_mm.h>
+#include <linux/iommu.h>
+#include <linux/iova.h>
+#include <linux/mm.h>
+
+int iommu_dma_init(void)
+{
+ return iova_cache_get();
+}
+
+/**
+ * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
+ * @domain: IOMMU domain to prepare for DMA-API usage
+ *
+ * IOMMU drivers should normally call this from their domain_alloc
+ * callback when domain->type == IOMMU_DOMAIN_DMA.
+ */
+int iommu_get_dma_cookie(struct iommu_domain *domain)
+{
+ struct iova_domain *iovad;
+
+ if (domain->iova_cookie)
+ return -EEXIST;
+
+ iovad = kzalloc(sizeof(*iovad), GFP_KERNEL);
+ domain->iova_cookie = iovad;
+
+ return iovad ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL(iommu_get_dma_cookie);
+
+/**
+ * iommu_put_dma_cookie - Release a domain's DMA mapping resources
+ * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
+ *
+ * IOMMU drivers should normally call this from their domain_free callback.
+ */
+void iommu_put_dma_cookie(struct iommu_domain *domain)
+{
+ struct iova_domain *iovad = domain->iova_cookie;
+
+ if (!iovad)
+ return;
+
+ put_iova_domain(iovad);
+ kfree(iovad);
+ domain->iova_cookie = NULL;
+}
+EXPORT_SYMBOL(iommu_put_dma_cookie);
+
+/**
+ * iommu_dma_init_domain - Initialise a DMA mapping domain
+ * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
+ * @base: IOVA at which the mappable address space starts
+ * @size: Size of IOVA space
+ *
+ * @base and @size should be exact multiples of IOMMU page granularity to
+ * avoid rounding surprises. If necessary, we reserve the page at address 0
+ * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
+ * any change which could make prior IOVAs invalid will fail.
+ */
+int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size)
+{
+ struct iova_domain *iovad = domain->iova_cookie;
+ unsigned long order, base_pfn, end_pfn;
+
+ if (!iovad)
+ return -ENODEV;
+
+ /* Use the smallest supported page size for IOVA granularity */
+ order = __ffs(domain->ops->pgsize_bitmap);
+ base_pfn = max_t(unsigned long, 1, base >> order);
+ end_pfn = (base + size - 1) >> order;
+
+ /* Check the domain allows at least some access to the device... */
+ if (domain->geometry.force_aperture) {
+ if (base > domain->geometry.aperture_end ||
+ base + size <= domain->geometry.aperture_start) {
+ pr_warn("specified DMA range outside IOMMU capability\n");
+ return -EFAULT;
+ }
+ /* ...then finally give it a kicking to make sure it fits */
+ base_pfn = max_t(unsigned long, base_pfn,
+ domain->geometry.aperture_start >> order);
+ end_pfn = min_t(unsigned long, end_pfn,
+ domain->geometry.aperture_end >> order);
+ }
+
+ /* All we can safely do with an existing domain is enlarge it */
+ if (iovad->start_pfn) {
+ if (1UL << order != iovad->granule ||
+ base_pfn != iovad->start_pfn ||
+ end_pfn < iovad->dma_32bit_pfn) {
+ pr_warn("Incompatible range for DMA domain\n");
+ return -EFAULT;
+ }
+ iovad->dma_32bit_pfn = end_pfn;
+ } else {
+ init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iommu_dma_init_domain);
+
+/**
+ * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
+ * @dir: Direction of DMA transfer
+ * @coherent: Is the DMA master cache-coherent?
+ *
+ * Return: corresponding IOMMU API page protection flags
+ */
+int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
+{
+ int prot = coherent ? IOMMU_CACHE : 0;
+
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ return prot | IOMMU_READ | IOMMU_WRITE;
+ case DMA_TO_DEVICE:
+ return prot | IOMMU_READ;
+ case DMA_FROM_DEVICE:
+ return prot | IOMMU_WRITE;
+ default:
+ return 0;
+ }
+}
+
+static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size,
+ dma_addr_t dma_limit)
+{
+ unsigned long shift = iova_shift(iovad);
+ unsigned long length = iova_align(iovad, size) >> shift;
+
+ /*
+ * Enforce size-alignment to be safe - there could perhaps be an
+ * attribute to control this per-device, or at least per-domain...
+ */
+ return alloc_iova(iovad, length, dma_limit >> shift, true);
+}
+
+/* The IOVA allocator knows what we mapped, so just unmap whatever that was */
+static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
+{
+ struct iova_domain *iovad = domain->iova_cookie;
+ unsigned long shift = iova_shift(iovad);
+ unsigned long pfn = dma_addr >> shift;
+ struct iova *iova = find_iova(iovad, pfn);
+ size_t size;
+
+ if (WARN_ON(!iova))
+ return;
+
+ size = iova_size(iova) << shift;
+ size -= iommu_unmap(domain, pfn << shift, size);
+ /* ...and if we can't, then something is horribly, horribly wrong */
+ WARN_ON(size > 0);
+ __free_iova(iovad, iova);
+}
+
+static void __iommu_dma_free_pages(struct page **pages, int count)
+{
+ while (count--)
+ __free_page(pages[count]);
+ kvfree(pages);
+}
+
+static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
+{
+ struct page **pages;
+ unsigned int i = 0, array_size = count * sizeof(*pages);
+
+ if (array_size <= PAGE_SIZE)
+ pages = kzalloc(array_size, GFP_KERNEL);
+ else
+ pages = vzalloc(array_size);
+ if (!pages)
+ return NULL;
+
+ /* IOMMU can map any pages, so himem can also be used here */
+ gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
+ while (count) {
+ struct page *page = NULL;
+ int j, order = __fls(count);
+
+ /*
+ * Higher-order allocations are a convenience rather
+ * than a necessity, hence using __GFP_NORETRY until
+ * falling back to single-page allocations.
+ */
+ for (order = min(order, MAX_ORDER); order > 0; order--) {
+ page = alloc_pages(gfp | __GFP_NORETRY, order);
+ if (!page)
+ continue;
+ if (PageCompound(page)) {
+ if (!split_huge_page(page))
+ break;
+ __free_pages(page, order);
+ } else {
+ split_page(page, order);
+ break;
+ }
+ }
+ if (!page)
+ page = alloc_page(gfp);
+ if (!page) {
+ __iommu_dma_free_pages(pages, i);
+ return NULL;
+ }
+ j = 1 << order;
+ count -= j;
+ while (j--)
+ pages[i++] = page++;
+ }
+ return pages;
+}
+
+/**
+ * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
+ * @dev: Device which owns this buffer
+ * @pages: Array of buffer pages as returned by iommu_dma_alloc()
+ * @size: Size of buffer in bytes
+ * @handle: DMA address of buffer
+ *
+ * Frees both the pages associated with the buffer, and the array
+ * describing them
+ */
+void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
+ dma_addr_t *handle)
+{
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle);
+ __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
+ *handle = DMA_ERROR_CODE;
+}
+
+/**
+ * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
+ * @dev: Device to allocate memory for. Must be a real device
+ * attached to an iommu_dma_domain
+ * @size: Size of buffer in bytes
+ * @gfp: Allocation flags
+ * @prot: IOMMU mapping flags
+ * @handle: Out argument for allocated DMA handle
+ * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
+ * given VA/PA are visible to the given non-coherent device.
+ *
+ * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
+ * but an IOMMU which supports smaller pages might not map the whole thing.
+ *
+ * Return: Array of struct page pointers describing the buffer,
+ * or NULL on failure.
+ */
+struct page **iommu_dma_alloc(struct device *dev, size_t size,
+ gfp_t gfp, int prot, dma_addr_t *handle,
+ void (*flush_page)(struct device *, const void *, phys_addr_t))
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iova_domain *iovad = domain->iova_cookie;
+ struct iova *iova;
+ struct page **pages;
+ struct sg_table sgt;
+ dma_addr_t dma_addr;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ *handle = DMA_ERROR_CODE;
+
+ pages = __iommu_dma_alloc_pages(count, gfp);
+ if (!pages)
+ return NULL;
+
+ iova = __alloc_iova(iovad, size, dev->coherent_dma_mask);
+ if (!iova)
+ goto out_free_pages;
+
+ size = iova_align(iovad, size);
+ if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
+ goto out_free_iova;
+
+ if (!(prot & IOMMU_CACHE)) {
+ struct sg_mapping_iter miter;
+ /*
+ * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
+ * sufficient here, so skip it by using the "wrong" direction.
+ */
+ sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
+ while (sg_miter_next(&miter))
+ flush_page(dev, miter.addr, page_to_phys(miter.page));
+ sg_miter_stop(&miter);
+ }
+
+ dma_addr = iova_dma_addr(iovad, iova);
+ if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot)
+ < size)
+ goto out_free_sg;
+
+ *handle = dma_addr;
+ sg_free_table(&sgt);
+ return pages;
+
+out_free_sg:
+ sg_free_table(&sgt);
+out_free_iova:
+ __free_iova(iovad, iova);
+out_free_pages:
+ __iommu_dma_free_pages(pages, count);
+ return NULL;
+}
+
+/**
+ * iommu_dma_mmap - Map a buffer into provided user VMA
+ * @pages: Array representing buffer from iommu_dma_alloc()
+ * @size: Size of buffer in bytes
+ * @vma: VMA describing requested userspace mapping
+ *
+ * Maps the pages of the buffer in @pages into @vma. The caller is responsible
+ * for verifying the correct size and protection of @vma beforehand.
+ */
+
+int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
+{
+ unsigned long uaddr = vma->vm_start;
+ unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ int ret = -ENXIO;
+
+ for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
+ ret = vm_insert_page(vma, uaddr, pages[i]);
+ if (ret)
+ break;
+ uaddr += PAGE_SIZE;
+ }
+ return ret;
+}
+
+dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, int prot)
+{
+ dma_addr_t dma_addr;
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iova_domain *iovad = domain->iova_cookie;
+ phys_addr_t phys = page_to_phys(page) + offset;
+ size_t iova_off = iova_offset(iovad, phys);
+ size_t len = iova_align(iovad, size + iova_off);
+ struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev));
+
+ if (!iova)
+ return DMA_ERROR_CODE;
+
+ dma_addr = iova_dma_addr(iovad, iova);
+ if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) {
+ __free_iova(iovad, iova);
+ return DMA_ERROR_CODE;
+ }
+ return dma_addr + iova_off;
+}
+
+void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
+}
+
+/*
+ * Prepare a successfully-mapped scatterlist to give back to the caller.
+ * Handling IOVA concatenation can come later, if needed
+ */
+static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
+ dma_addr_t dma_addr)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ /* Un-swizzling the fields here, hence the naming mismatch */
+ unsigned int s_offset = sg_dma_address(s);
+ unsigned int s_length = sg_dma_len(s);
+ unsigned int s_dma_len = s->length;
+
+ s->offset = s_offset;
+ s->length = s_length;
+ sg_dma_address(s) = dma_addr + s_offset;
+ dma_addr += s_dma_len;
+ }
+ return i;
+}
+
+/*
+ * If mapping failed, then just restore the original list,
+ * but making sure the DMA fields are invalidated.
+ */
+static void __invalidate_sg(struct scatterlist *sg, int nents)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ if (sg_dma_address(s) != DMA_ERROR_CODE)
+ s->offset = sg_dma_address(s);
+ if (sg_dma_len(s))
+ s->length = sg_dma_len(s);
+ sg_dma_address(s) = DMA_ERROR_CODE;
+ sg_dma_len(s) = 0;
+ }
+}
+
+/*
+ * The DMA API client is passing in a scatterlist which could describe
+ * any old buffer layout, but the IOMMU API requires everything to be
+ * aligned to IOMMU pages. Hence the need for this complicated bit of
+ * impedance-matching, to be able to hand off a suitably-aligned list,
+ * but still preserve the original offsets and sizes for the caller.
+ */
+int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, int prot)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iova_domain *iovad = domain->iova_cookie;
+ struct iova *iova;
+ struct scatterlist *s, *prev = NULL;
+ dma_addr_t dma_addr;
+ size_t iova_len = 0;
+ int i;
+
+ /*
+ * Work out how much IOVA space we need, and align the segments to
+ * IOVA granules for the IOMMU driver to handle. With some clever
+ * trickery we can modify the list in-place, but reversibly, by
+ * hiding the original data in the as-yet-unused DMA fields.
+ */
+ for_each_sg(sg, s, nents, i) {
+ size_t s_offset = iova_offset(iovad, s->offset);
+ size_t s_length = s->length;
+
+ sg_dma_address(s) = s->offset;
+ sg_dma_len(s) = s_length;
+ s->offset -= s_offset;
+ s_length = iova_align(iovad, s_length + s_offset);
+ s->length = s_length;
+
+ /*
+ * The simple way to avoid the rare case of a segment
+ * crossing the boundary mask is to pad the previous one
+ * to end at a naturally-aligned IOVA for this one's size,
+ * at the cost of potentially over-allocating a little.
+ */
+ if (prev) {
+ size_t pad_len = roundup_pow_of_two(s_length);
+
+ pad_len = (pad_len - iova_len) & (pad_len - 1);
+ prev->length += pad_len;
+ iova_len += pad_len;
+ }
+
+ iova_len += s_length;
+ prev = s;
+ }
+
+ iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev));
+ if (!iova)
+ goto out_restore_sg;
+
+ /*
+ * We'll leave any physical concatenation to the IOMMU driver's
+ * implementation - it knows better than we do.
+ */
+ dma_addr = iova_dma_addr(iovad, iova);
+ if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
+ goto out_free_iova;
+
+ return __finalise_sg(dev, sg, nents, dma_addr);
+
+out_free_iova:
+ __free_iova(iovad, iova);
+out_restore_sg:
+ __invalidate_sg(sg, nents);
+ return 0;
+}
+
+void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ /*
+ * The scatterlist segments are mapped into a single
+ * contiguous IOVA allocation, so this is incredibly easy.
+ */
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
+}
+
+int iommu_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * 'Special' IOMMUs which don't have the same addressing capability
+ * as the CPU will have to wait until we have some way to query that
+ * before they'll be able to use this framework.
+ */
+ return 1;
+}
+
+int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == DMA_ERROR_CODE;
+}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8757f8dfc4e5..80e3c176008e 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1086,6 +1086,11 @@ static void free_iommu(struct intel_iommu *iommu)
iommu_device_destroy(iommu->iommu_dev);
if (iommu->irq) {
+ if (iommu->pr_irq) {
+ free_irq(iommu->pr_irq, iommu);
+ dmar_free_hwirq(iommu->pr_irq);
+ iommu->pr_irq = 0;
+ }
free_irq(iommu->irq, iommu);
dmar_free_hwirq(iommu->irq);
iommu->irq = 0;
@@ -1493,53 +1498,68 @@ static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
}
}
+
+static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
+{
+ if (iommu->irq == irq)
+ return DMAR_FECTL_REG;
+ else if (iommu->pr_irq == irq)
+ return DMAR_PECTL_REG;
+ else
+ BUG();
+}
+
void dmar_msi_unmask(struct irq_data *data)
{
struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
+ int reg = dmar_msi_reg(iommu, data->irq);
unsigned long flag;
/* unmask it */
raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(0, iommu->reg + DMAR_FECTL_REG);
+ writel(0, iommu->reg + reg);
/* Read a reg to force flush the post write */
- readl(iommu->reg + DMAR_FECTL_REG);
+ readl(iommu->reg + reg);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
void dmar_msi_mask(struct irq_data *data)
{
- unsigned long flag;
struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
+ int reg = dmar_msi_reg(iommu, data->irq);
+ unsigned long flag;
/* mask it */
raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
+ writel(DMA_FECTL_IM, iommu->reg + reg);
/* Read a reg to force flush the post write */
- readl(iommu->reg + DMAR_FECTL_REG);
+ readl(iommu->reg + reg);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
void dmar_msi_write(int irq, struct msi_msg *msg)
{
struct intel_iommu *iommu = irq_get_handler_data(irq);
+ int reg = dmar_msi_reg(iommu, irq);
unsigned long flag;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
- writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
- writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
+ writel(msg->data, iommu->reg + reg + 4);
+ writel(msg->address_lo, iommu->reg + reg + 8);
+ writel(msg->address_hi, iommu->reg + reg + 12);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
void dmar_msi_read(int irq, struct msi_msg *msg)
{
struct intel_iommu *iommu = irq_get_handler_data(irq);
+ int reg = dmar_msi_reg(iommu, irq);
unsigned long flag;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
- msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
- msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
- msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
+ msg->data = readl(iommu->reg + reg + 4);
+ msg->address_lo = readl(iommu->reg + reg + 8);
+ msg->address_hi = readl(iommu->reg + reg + 12);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 1d452930c890..da0e1e30ef37 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -923,7 +923,7 @@ static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
/* We can partition PCIe devices so assign device group to the device */
if (pci_endpt_partioning) {
- group = iommu_group_get_for_dev(&pdev->dev);
+ group = pci_device_group(&pdev->dev);
/*
* PCIe controller is not a paritionable entity
@@ -956,44 +956,34 @@ static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
return group;
}
-static int fsl_pamu_add_device(struct device *dev)
+static struct iommu_group *fsl_pamu_device_group(struct device *dev)
{
struct iommu_group *group = ERR_PTR(-ENODEV);
- struct pci_dev *pdev;
- const u32 *prop;
- int ret = 0, len;
+ int len;
/*
* For platform devices we allocate a separate group for
* each of the devices.
*/
- if (dev_is_pci(dev)) {
- pdev = to_pci_dev(dev);
- /* Don't create device groups for virtual PCI bridges */
- if (pdev->subordinate)
- return 0;
+ if (dev_is_pci(dev))
+ group = get_pci_device_group(to_pci_dev(dev));
+ else if (of_get_property(dev->of_node, "fsl,liodn", &len))
+ group = get_device_iommu_group(dev);
- group = get_pci_device_group(pdev);
+ return group;
+}
- } else {
- prop = of_get_property(dev->of_node, "fsl,liodn", &len);
- if (prop)
- group = get_device_iommu_group(dev);
- }
+static int fsl_pamu_add_device(struct device *dev)
+{
+ struct iommu_group *group;
+ group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
- /*
- * Check if device has already been added to an iommu group.
- * Group could have already been created for a PCI device in
- * the iommu_group_get_for_dev path.
- */
- if (!dev->iommu_group)
- ret = iommu_group_add_device(group, dev);
-
iommu_group_put(group);
- return ret;
+
+ return 0;
}
static void fsl_pamu_remove_device(struct device *dev)
@@ -1072,6 +1062,7 @@ static const struct iommu_ops fsl_pamu_ops = {
.domain_get_attr = fsl_pamu_get_domain_attr,
.add_device = fsl_pamu_add_device,
.remove_device = fsl_pamu_remove_device,
+ .device_group = fsl_pamu_device_group,
};
int __init pamu_domain_init(void)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index d65cf42399e8..7cf80c1a8a16 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -34,6 +34,7 @@
#include <linux/mempool.h>
#include <linux/memory.h>
#include <linux/timer.h>
+#include <linux/io.h>
#include <linux/iova.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
@@ -418,10 +419,13 @@ struct device_domain_info {
struct list_head global; /* link to global list */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
- struct {
- u8 enabled:1;
- u8 qdep;
- } ats; /* ATS state */
+ u8 pasid_supported:3;
+ u8 pasid_enabled:1;
+ u8 pri_supported:1;
+ u8 pri_enabled:1;
+ u8 ats_supported:1;
+ u8 ats_enabled:1;
+ u8 ats_qdep;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
@@ -497,13 +501,37 @@ static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int intel_iommu_ecs = 1;
+static int intel_iommu_pasid28;
+static int iommu_identity_mapping;
+
+#define IDENTMAP_ALL 1
+#define IDENTMAP_GFX 2
+#define IDENTMAP_AZALIA 4
-/* We only actually use ECS when PASID support (on the new bit 40)
- * is also advertised. Some early implementations — the ones with
- * PASID support on bit 28 — have issues even when we *only* use
- * extended root/context tables. */
+/* Broadwell and Skylake have broken ECS support — normal so-called "second
+ * level" translation of DMA requests-without-PASID doesn't actually happen
+ * unless you also set the NESTE bit in an extended context-entry. Which of
+ * course means that SVM doesn't work because it's trying to do nested
+ * translation of the physical addresses it finds in the process page tables,
+ * through the IOVA->phys mapping found in the "second level" page tables.
+ *
+ * The VT-d specification was retroactively changed to change the definition
+ * of the capability bits and pretend that Broadwell/Skylake never happened...
+ * but unfortunately the wrong bit was changed. It's ECS which is broken, but
+ * for some reason it was the PASID capability bit which was redefined (from
+ * bit 28 on BDW/SKL to bit 40 in future).
+ *
+ * So our test for ECS needs to eschew those implementations which set the old
+ * PASID capabiity bit 28, since those are the ones on which ECS is broken.
+ * Unless we are working around the 'pasid28' limitations, that is, by putting
+ * the device into passthrough mode for normal DMA and thus masking the bug.
+ */
#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
- ecap_pasid(iommu->ecap))
+ (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
+/* PASID support is thus enabled if ECS is enabled and *either* of the old
+ * or new capability bits are set. */
+#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
+ (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -566,6 +594,11 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable extended context table support\n");
intel_iommu_ecs = 0;
+ } else if (!strncmp(str, "pasid28", 7)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: enable pre-production PASID support\n");
+ intel_iommu_pasid28 = 1;
+ iommu_identity_mapping |= IDENTMAP_GFX;
}
str += strcspn(str, ",");
@@ -1407,37 +1440,22 @@ static struct device_domain_info *
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
- bool found = false;
struct device_domain_info *info;
- struct pci_dev *pdev;
assert_spin_locked(&device_domain_lock);
- if (!ecap_dev_iotlb_support(iommu->ecap))
- return NULL;
-
if (!iommu->qi)
return NULL;
list_for_each_entry(info, &domain->devices, link)
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
- found = true;
+ if (info->ats_supported && info->dev)
+ return info;
break;
}
- if (!found || !info->dev || !dev_is_pci(info->dev))
- return NULL;
-
- pdev = to_pci_dev(info->dev);
-
- if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
- return NULL;
-
- if (!dmar_find_matched_atsr_unit(pdev))
- return NULL;
-
- return info;
+ return NULL;
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
@@ -1448,20 +1466,48 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
return;
pdev = to_pci_dev(info->dev);
- if (pci_enable_ats(pdev, VTD_PAGE_SHIFT))
- return;
- info->ats.enabled = 1;
- info->ats.qdep = pci_ats_queue_depth(pdev);
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ /* The PCIe spec, in its wisdom, declares that the behaviour of
+ the device if you enable PASID support after ATS support is
+ undefined. So always enable PASID support on devices which
+ have it, even if we can't yet know if we're ever going to
+ use it. */
+ if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
+ info->pasid_enabled = 1;
+
+ if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
+ info->pri_enabled = 1;
+#endif
+ if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
+ info->ats_enabled = 1;
+ info->ats_qdep = pci_ats_queue_depth(pdev);
+ }
}
static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
- if (!info->ats.enabled)
+ struct pci_dev *pdev;
+
+ if (dev_is_pci(info->dev))
return;
- pci_disable_ats(to_pci_dev(info->dev));
- info->ats.enabled = 0;
+ pdev = to_pci_dev(info->dev);
+
+ if (info->ats_enabled) {
+ pci_disable_ats(pdev);
+ info->ats_enabled = 0;
+ }
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ if (info->pri_enabled) {
+ pci_disable_pri(pdev);
+ info->pri_enabled = 0;
+ }
+ if (info->pasid_enabled) {
+ pci_disable_pasid(pdev);
+ info->pasid_enabled = 0;
+ }
+#endif
}
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
@@ -1473,11 +1519,11 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link) {
- if (!info->ats.enabled)
+ if (!info->ats_enabled)
continue;
sid = info->bus << 8 | info->devfn;
- qdep = info->ats.qdep;
+ qdep = info->ats_qdep;
qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -1667,6 +1713,14 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
/* free context mapping */
free_context_table(iommu);
+
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ if (pasid_enabled(iommu)) {
+ if (ecap_prs(iommu->ecap))
+ intel_svm_finish_prq(iommu);
+ intel_svm_free_pasid_tables(iommu);
+ }
+#endif
}
static struct dmar_domain *alloc_domain(int flags)
@@ -1934,8 +1988,10 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
}
info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
- translation = info ? CONTEXT_TT_DEV_IOTLB :
- CONTEXT_TT_MULTI_LEVEL;
+ if (info && info->ats_supported)
+ translation = CONTEXT_TT_DEV_IOTLB;
+ else
+ translation = CONTEXT_TT_MULTI_LEVEL;
context_set_address_root(context, virt_to_phys(pgd));
context_set_address_width(context, iommu->agaw);
@@ -2273,12 +2329,34 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->bus = bus;
info->devfn = devfn;
- info->ats.enabled = 0;
- info->ats.qdep = 0;
+ info->ats_supported = info->pasid_supported = info->pri_supported = 0;
+ info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
+ info->ats_qdep = 0;
info->dev = dev;
info->domain = domain;
info->iommu = iommu;
+ if (dev && dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(info->dev);
+
+ if (ecap_dev_iotlb_support(iommu->ecap) &&
+ pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
+ dmar_find_matched_atsr_unit(pdev))
+ info->ats_supported = 1;
+
+ if (ecs_enabled(iommu)) {
+ if (pasid_enabled(iommu)) {
+ int features = pci_pasid_features(pdev);
+ if (features >= 0)
+ info->pasid_supported = features | 1;
+ }
+
+ if (info->ats_supported && ecap_prs(iommu->ecap) &&
+ pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
+ info->pri_supported = 1;
+ }
+ }
+
spin_lock_irqsave(&device_domain_lock, flags);
if (dev)
found = find_domain(dev);
@@ -2404,11 +2482,6 @@ found_domain:
return domain;
}
-static int iommu_identity_mapping;
-#define IDENTMAP_ALL 1
-#define IDENTMAP_GFX 2
-#define IDENTMAP_AZALIA 4
-
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long long start,
unsigned long long end)
@@ -2434,17 +2507,11 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
DMA_PTE_READ|DMA_PTE_WRITE);
}
-static int iommu_prepare_identity_map(struct device *dev,
- unsigned long long start,
- unsigned long long end)
+static int domain_prepare_identity_map(struct device *dev,
+ struct dmar_domain *domain,
+ unsigned long long start,
+ unsigned long long end)
{
- struct dmar_domain *domain;
- int ret;
-
- domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain)
- return -ENOMEM;
-
/* For _hardware_ passthrough, don't bother. But for software
passthrough, we do it anyway -- it may indicate a memory
range which is reserved in E820, so which didn't get set
@@ -2464,8 +2531,7 @@ static int iommu_prepare_identity_map(struct device *dev,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
- ret = -EIO;
- goto error;
+ return -EIO;
}
if (end >> agaw_to_width(domain->agaw)) {
@@ -2475,18 +2541,27 @@ static int iommu_prepare_identity_map(struct device *dev,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
- ret = -EIO;
- goto error;
+ return -EIO;
}
- ret = iommu_domain_identity_map(domain, start, end);
- if (ret)
- goto error;
+ return iommu_domain_identity_map(domain, start, end);
+}
- return 0;
+static int iommu_prepare_identity_map(struct device *dev,
+ unsigned long long start,
+ unsigned long long end)
+{
+ struct dmar_domain *domain;
+ int ret;
+
+ domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ if (!domain)
+ return -ENOMEM;
+
+ ret = domain_prepare_identity_map(dev, domain, start, end);
+ if (ret)
+ domain_exit(domain);
- error:
- domain_exit(domain);
return ret;
}
@@ -2812,18 +2887,18 @@ static void intel_iommu_init_qi(struct intel_iommu *iommu)
}
static int copy_context_table(struct intel_iommu *iommu,
- struct root_entry __iomem *old_re,
+ struct root_entry *old_re,
struct context_entry **tbl,
int bus, bool ext)
{
int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
- struct context_entry __iomem *old_ce = NULL;
struct context_entry *new_ce = NULL, ce;
+ struct context_entry *old_ce = NULL;
struct root_entry re;
phys_addr_t old_ce_phys;
tbl_idx = ext ? bus * 2 : bus;
- memcpy_fromio(&re, old_re, sizeof(re));
+ memcpy(&re, old_re, sizeof(re));
for (devfn = 0; devfn < 256; devfn++) {
/* First calculate the correct index */
@@ -2858,7 +2933,8 @@ static int copy_context_table(struct intel_iommu *iommu,
}
ret = -ENOMEM;
- old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
+ old_ce = memremap(old_ce_phys, PAGE_SIZE,
+ MEMREMAP_WB);
if (!old_ce)
goto out;
@@ -2870,7 +2946,7 @@ static int copy_context_table(struct intel_iommu *iommu,
}
/* Now copy the context entry */
- memcpy_fromio(&ce, old_ce + idx, sizeof(ce));
+ memcpy(&ce, old_ce + idx, sizeof(ce));
if (!__context_present(&ce))
continue;
@@ -2906,7 +2982,7 @@ static int copy_context_table(struct intel_iommu *iommu,
__iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
out_unmap:
- iounmap(old_ce);
+ memunmap(old_ce);
out:
return ret;
@@ -2914,8 +2990,8 @@ out:
static int copy_translation_tables(struct intel_iommu *iommu)
{
- struct root_entry __iomem *old_rt;
struct context_entry **ctxt_tbls;
+ struct root_entry *old_rt;
phys_addr_t old_rt_phys;
int ctxt_table_entries;
unsigned long flags;
@@ -2940,7 +3016,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
if (!old_rt_phys)
return -EINVAL;
- old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
+ old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
if (!old_rt)
return -ENOMEM;
@@ -2989,7 +3065,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
ret = 0;
out_unmap:
- iounmap(old_rt);
+ memunmap(old_rt);
return ret;
}
@@ -3100,6 +3176,10 @@ static int __init init_dmars(void)
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ if (pasid_enabled(iommu))
+ intel_svm_alloc_pasid_tables(iommu);
+#endif
}
if (iommu_pass_through)
@@ -3187,6 +3267,13 @@ domains_done:
iommu_flush_write_buffer(iommu);
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
+ ret = intel_svm_enable_prq(iommu);
+ if (ret)
+ goto free_iommu;
+ }
+#endif
ret = dmar_set_interrupt(iommu);
if (ret)
goto free_iommu;
@@ -3246,7 +3333,10 @@ static struct iova *intel_alloc_iova(struct device *dev,
static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
{
+ struct dmar_rmrr_unit *rmrr;
struct dmar_domain *domain;
+ struct device *i_dev;
+ int i, ret;
domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain) {
@@ -3255,6 +3345,23 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
return NULL;
}
+ /* We have a new domain - setup possible RMRRs for the device */
+ rcu_read_lock();
+ for_each_rmrr_units(rmrr) {
+ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+ i, i_dev) {
+ if (i_dev != dev)
+ continue;
+
+ ret = domain_prepare_identity_map(dev, domain,
+ rmrr->base_address,
+ rmrr->end_address);
+ if (ret)
+ dev_err(dev, "Mapping reserved region failed\n");
+ }
+ }
+ rcu_read_unlock();
+
return domain;
}
@@ -4115,6 +4222,11 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (ret)
goto out;
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ if (pasid_enabled(iommu))
+ intel_svm_alloc_pasid_tables(iommu);
+#endif
+
if (dmaru->ignored) {
/*
* we always have to disable PMRs or DMA may fail on this device
@@ -4126,6 +4238,14 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
intel_iommu_init_qi(iommu);
iommu_flush_write_buffer(iommu);
+
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
+ ret = intel_svm_enable_prq(iommu);
+ if (ret)
+ goto disable_iommu;
+ }
+#endif
ret = dmar_set_interrupt(iommu);
if (ret)
goto disable_iommu;
@@ -4194,14 +4314,17 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev)
dev = pci_physfn(dev);
for (bus = dev->bus; bus; bus = bus->parent) {
bridge = bus->self;
- if (!bridge || !pci_is_pcie(bridge) ||
+ /* If it's an integrated device, allow ATS */
+ if (!bridge)
+ return 1;
+ /* Connected via non-PCIe: no ATS */
+ if (!pci_is_pcie(bridge) ||
pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
return 0;
+ /* If we found the root port, look it up in the ATSR */
if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
break;
}
- if (!bridge)
- return 0;
rcu_read_lock();
list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
@@ -4865,6 +4988,114 @@ static void intel_iommu_remove_device(struct device *dev)
iommu_device_unlink(iommu->iommu_dev, dev);
}
+#ifdef CONFIG_INTEL_IOMMU_SVM
+int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
+{
+ struct device_domain_info *info;
+ struct context_entry *context;
+ struct dmar_domain *domain;
+ unsigned long flags;
+ u64 ctx_lo;
+ int ret;
+
+ domain = get_valid_domain_for_dev(sdev->dev);
+ if (!domain)
+ return -EINVAL;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock(&iommu->lock);
+
+ ret = -EINVAL;
+ info = sdev->dev->archdata.iommu;
+ if (!info || !info->pasid_supported)
+ goto out;
+
+ context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
+ if (WARN_ON(!context))
+ goto out;
+
+ ctx_lo = context[0].lo;
+
+ sdev->did = domain->iommu_did[iommu->seq_id];
+ sdev->sid = PCI_DEVID(info->bus, info->devfn);
+
+ if (!(ctx_lo & CONTEXT_PASIDE)) {
+ context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
+ context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap);
+ wmb();
+ /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
+ * extended to permit requests-with-PASID if the PASIDE bit
+ * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
+ * however, the PASIDE bit is ignored and requests-with-PASID
+ * are unconditionally blocked. Which makes less sense.
+ * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
+ * "guest mode" translation types depending on whether ATS
+ * is available or not. Annoyingly, we can't use the new
+ * modes *unless* PASIDE is set. */
+ if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
+ ctx_lo &= ~CONTEXT_TT_MASK;
+ if (info->ats_supported)
+ ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
+ else
+ ctx_lo |= CONTEXT_TT_PT_PASID << 2;
+ }
+ ctx_lo |= CONTEXT_PASIDE;
+ if (iommu->pasid_state_table)
+ ctx_lo |= CONTEXT_DINVE;
+ if (info->pri_supported)
+ ctx_lo |= CONTEXT_PRS;
+ context[0].lo = ctx_lo;
+ wmb();
+ iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ }
+
+ /* Enable PASID support in the device, if it wasn't already */
+ if (!info->pasid_enabled)
+ iommu_enable_dev_iotlb(info);
+
+ if (info->ats_enabled) {
+ sdev->dev_iotlb = 1;
+ sdev->qdep = info->ats_qdep;
+ if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
+ sdev->qdep = 0;
+ }
+ ret = 0;
+
+ out:
+ spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return ret;
+}
+
+struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
+{
+ struct intel_iommu *iommu;
+ u8 bus, devfn;
+
+ if (iommu_dummy(dev)) {
+ dev_warn(dev,
+ "No IOMMU translation for device; cannot enable SVM\n");
+ return NULL;
+ }
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if ((!iommu)) {
+ dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
+ return NULL;
+ }
+
+ if (!iommu->pasid_table) {
+ dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
+ return NULL;
+ }
+
+ return iommu;
+}
+#endif /* CONFIG_INTEL_IOMMU_SVM */
+
static const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
@@ -4877,6 +5108,7 @@ static const struct iommu_ops intel_iommu_ops = {
.iova_to_phys = intel_iommu_iova_to_phys,
.add_device = intel_iommu_add_device,
.remove_device = intel_iommu_remove_device,
+ .device_group = pci_device_group,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
new file mode 100644
index 000000000000..c69e3f9ec958
--- /dev/null
+++ b/drivers/iommu/intel-svm.c
@@ -0,0 +1,602 @@
+/*
+ * Copyright © 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ */
+
+#include <linux/intel-iommu.h>
+#include <linux/mmu_notifier.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/intel-svm.h>
+#include <linux/rculist.h>
+#include <linux/pci.h>
+#include <linux/pci-ats.h>
+#include <linux/dmar.h>
+#include <linux/interrupt.h>
+
+static irqreturn_t prq_event_thread(int irq, void *d);
+
+struct pasid_entry {
+ u64 val;
+};
+
+struct pasid_state_entry {
+ u64 val;
+};
+
+int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
+{
+ struct page *pages;
+ int order;
+
+ order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
+ if (order < 0)
+ order = 0;
+
+ pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!pages) {
+ pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
+ iommu->name);
+ return -ENOMEM;
+ }
+ iommu->pasid_table = page_address(pages);
+ pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
+
+ if (ecap_dis(iommu->ecap)) {
+ pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (pages)
+ iommu->pasid_state_table = page_address(pages);
+ else
+ pr_warn("IOMMU: %s: Failed to allocate PASID state table\n",
+ iommu->name);
+ }
+
+ idr_init(&iommu->pasid_idr);
+
+ return 0;
+}
+
+int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
+{
+ int order;
+
+ order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
+ if (order < 0)
+ order = 0;
+
+ if (iommu->pasid_table) {
+ free_pages((unsigned long)iommu->pasid_table, order);
+ iommu->pasid_table = NULL;
+ }
+ if (iommu->pasid_state_table) {
+ free_pages((unsigned long)iommu->pasid_state_table, order);
+ iommu->pasid_state_table = NULL;
+ }
+ idr_destroy(&iommu->pasid_idr);
+ return 0;
+}
+
+#define PRQ_ORDER 0
+
+int intel_svm_enable_prq(struct intel_iommu *iommu)
+{
+ struct page *pages;
+ int irq, ret;
+
+ pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
+ if (!pages) {
+ pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
+ iommu->name);
+ return -ENOMEM;
+ }
+ iommu->prq = page_address(pages);
+
+ irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu);
+ if (irq <= 0) {
+ pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
+ iommu->name);
+ ret = -EINVAL;
+ err:
+ free_pages((unsigned long)iommu->prq, PRQ_ORDER);
+ iommu->prq = NULL;
+ return ret;
+ }
+ iommu->pr_irq = irq;
+
+ snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
+
+ ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
+ iommu->prq_name, iommu);
+ if (ret) {
+ pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
+ iommu->name);
+ dmar_free_hwirq(irq);
+ goto err;
+ }
+ dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
+ dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
+ dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
+
+ return 0;
+}
+
+int intel_svm_finish_prq(struct intel_iommu *iommu)
+{
+ dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
+ dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
+ dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
+
+ free_irq(iommu->pr_irq, iommu);
+ dmar_free_hwirq(iommu->pr_irq);
+ iommu->pr_irq = 0;
+
+ free_pages((unsigned long)iommu->prq, PRQ_ORDER);
+ iommu->prq = NULL;
+
+ return 0;
+}
+
+static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
+ unsigned long address, unsigned long pages, int ih, int gl)
+{
+ struct qi_desc desc;
+
+ if (pages == -1) {
+ /* For global kernel pages we have to flush them in *all* PASIDs
+ * because that's the only option the hardware gives us. Despite
+ * the fact that they are actually only accessible through one. */
+ if (gl)
+ desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
+ QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | QI_EIOTLB_TYPE;
+ else
+ desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
+ QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE;
+ desc.high = 0;
+ } else {
+ int mask = ilog2(__roundup_pow_of_two(pages));
+
+ desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
+ QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE;
+ desc.high = QI_EIOTLB_ADDR(address) | QI_EIOTLB_GL(gl) |
+ QI_EIOTLB_IH(ih) | QI_EIOTLB_AM(mask);
+ }
+ qi_submit_sync(&desc, svm->iommu);
+
+ if (sdev->dev_iotlb) {
+ desc.low = QI_DEV_EIOTLB_PASID(svm->pasid) | QI_DEV_EIOTLB_SID(sdev->sid) |
+ QI_DEV_EIOTLB_QDEP(sdev->qdep) | QI_DEIOTLB_TYPE;
+ if (pages == -1) {
+ desc.high = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) | QI_DEV_EIOTLB_SIZE;
+ } else if (pages > 1) {
+ /* The least significant zero bit indicates the size. So,
+ * for example, an "address" value of 0x12345f000 will
+ * flush from 0x123440000 to 0x12347ffff (256KiB). */
+ unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
+ unsigned long mask = __rounddown_pow_of_two(address ^ last);;
+
+ desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE;
+ } else {
+ desc.high = QI_DEV_EIOTLB_ADDR(address);
+ }
+ qi_submit_sync(&desc, svm->iommu);
+ }
+}
+
+static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
+ unsigned long pages, int ih, int gl)
+{
+ struct intel_svm_dev *sdev;
+
+ /* Try deferred invalidate if available */
+ if (svm->iommu->pasid_state_table &&
+ !cmpxchg64(&svm->iommu->pasid_state_table[svm->pasid].val, 0, 1ULL << 63))
+ return;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdev, &svm->devs, list)
+ intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
+ rcu_read_unlock();
+}
+
+static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
+ unsigned long address, pte_t pte)
+{
+ struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+
+ intel_flush_svm_range(svm, address, 1, 1, 0);
+}
+
+static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
+ unsigned long address)
+{
+ struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+
+ intel_flush_svm_range(svm, address, 1, 1, 0);
+}
+
+/* Pages have been freed at this point */
+static void intel_invalidate_range(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+
+ intel_flush_svm_range(svm, start,
+ (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
+}
+
+
+static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *sdev, int pasid)
+{
+ struct qi_desc desc;
+
+ desc.high = 0;
+ desc.low = QI_PC_TYPE | QI_PC_DID(sdev->did) | QI_PC_PASID_SEL | QI_PC_PASID(pasid);
+
+ qi_submit_sync(&desc, svm->iommu);
+}
+
+static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+ struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+
+ svm->iommu->pasid_table[svm->pasid].val = 0;
+
+ /* There's no need to do any flush because we can't get here if there
+ * are any devices left anyway. */
+ WARN_ON(!list_empty(&svm->devs));
+}
+
+static const struct mmu_notifier_ops intel_mmuops = {
+ .release = intel_mm_release,
+ .change_pte = intel_change_pte,
+ .invalidate_page = intel_invalidate_page,
+ .invalidate_range = intel_invalidate_range,
+};
+
+static DEFINE_MUTEX(pasid_mutex);
+
+int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
+{
+ struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct intel_svm_dev *sdev;
+ struct intel_svm *svm = NULL;
+ struct mm_struct *mm = NULL;
+ int pasid_max;
+ int ret;
+
+ if (WARN_ON(!iommu))
+ return -EINVAL;
+
+ if (dev_is_pci(dev)) {
+ pasid_max = pci_max_pasids(to_pci_dev(dev));
+ if (pasid_max < 0)
+ return -EINVAL;
+ } else
+ pasid_max = 1 << 20;
+
+ if ((flags & SVM_FLAG_SUPERVISOR_MODE)) {
+ if (!ecap_srs(iommu->ecap))
+ return -EINVAL;
+ } else if (pasid) {
+ mm = get_task_mm(current);
+ BUG_ON(!mm);
+ }
+
+ mutex_lock(&pasid_mutex);
+ if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) {
+ int i;
+
+ idr_for_each_entry(&iommu->pasid_idr, svm, i) {
+ if (svm->mm != mm ||
+ (svm->flags & SVM_FLAG_PRIVATE_PASID))
+ continue;
+
+ if (svm->pasid >= pasid_max) {
+ dev_warn(dev,
+ "Limited PASID width. Cannot use existing PASID %d\n",
+ svm->pasid);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ list_for_each_entry(sdev, &svm->devs, list) {
+ if (dev == sdev->dev) {
+ if (sdev->ops != ops) {
+ ret = -EBUSY;
+ goto out;
+ }
+ sdev->users++;
+ goto success;
+ }
+ }
+
+ break;
+ }
+ }
+
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!sdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ sdev->dev = dev;
+
+ ret = intel_iommu_enable_pasid(iommu, sdev);
+ if (ret || !pasid) {
+ /* If they don't actually want to assign a PASID, this is
+ * just an enabling check/preparation. */
+ kfree(sdev);
+ goto out;
+ }
+ /* Finish the setup now we know we're keeping it */
+ sdev->users = 1;
+ sdev->ops = ops;
+ init_rcu_head(&sdev->rcu);
+
+ if (!svm) {
+ svm = kzalloc(sizeof(*svm), GFP_KERNEL);
+ if (!svm) {
+ ret = -ENOMEM;
+ kfree(sdev);
+ goto out;
+ }
+ svm->iommu = iommu;
+
+ if (pasid_max > 2 << ecap_pss(iommu->ecap))
+ pasid_max = 2 << ecap_pss(iommu->ecap);
+
+ /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
+ ret = idr_alloc(&iommu->pasid_idr, svm,
+ !!cap_caching_mode(iommu->cap),
+ pasid_max - 1, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(svm);
+ goto out;
+ }
+ svm->pasid = ret;
+ svm->notifier.ops = &intel_mmuops;
+ svm->mm = mm;
+ svm->flags = flags;
+ INIT_LIST_HEAD_RCU(&svm->devs);
+ ret = -ENOMEM;
+ if (mm) {
+ ret = mmu_notifier_register(&svm->notifier, mm);
+ if (ret) {
+ idr_remove(&svm->iommu->pasid_idr, svm->pasid);
+ kfree(svm);
+ kfree(sdev);
+ goto out;
+ }
+ iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
+ mm = NULL;
+ } else
+ iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
+ wmb();
+ /* In caching mode, we still have to flush with PASID 0 when
+ * a PASID table entry becomes present. Not entirely clear
+ * *why* that would be the case — surely we could just issue
+ * a flush with the PASID value that we've changed? The PASID
+ * is the index into the table, after all. It's not like domain
+ * IDs in the case of the equivalent context-entry change in
+ * caching mode. And for that matter it's not entirely clear why
+ * a VMM would be in the business of caching the PASID table
+ * anyway. Surely that can be left entirely to the guest? */
+ if (cap_caching_mode(iommu->cap))
+ intel_flush_pasid_dev(svm, sdev, 0);
+ }
+ list_add_rcu(&sdev->list, &svm->devs);
+
+ success:
+ *pasid = svm->pasid;
+ ret = 0;
+ out:
+ mutex_unlock(&pasid_mutex);
+ if (mm)
+ mmput(mm);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_svm_bind_mm);
+
+int intel_svm_unbind_mm(struct device *dev, int pasid)
+{
+ struct intel_svm_dev *sdev;
+ struct intel_iommu *iommu;
+ struct intel_svm *svm;
+ int ret = -EINVAL;
+
+ mutex_lock(&pasid_mutex);
+ iommu = intel_svm_device_to_iommu(dev);
+ if (!iommu || !iommu->pasid_table)
+ goto out;
+
+ svm = idr_find(&iommu->pasid_idr, pasid);
+ if (!svm)
+ goto out;
+
+ list_for_each_entry(sdev, &svm->devs, list) {
+ if (dev == sdev->dev) {
+ ret = 0;
+ sdev->users--;
+ if (!sdev->users) {
+ list_del_rcu(&sdev->list);
+ /* Flush the PASID cache and IOTLB for this device.
+ * Note that we do depend on the hardware *not* using
+ * the PASID any more. Just as we depend on other
+ * devices never using PASIDs that they have no right
+ * to use. We have a *shared* PASID table, because it's
+ * large and has to be physically contiguous. So it's
+ * hard to be as defensive as we might like. */
+ intel_flush_pasid_dev(svm, sdev, svm->pasid);
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+ kfree_rcu(sdev, rcu);
+
+ if (list_empty(&svm->devs)) {
+ mmu_notifier_unregister(&svm->notifier, svm->mm);
+
+ idr_remove(&svm->iommu->pasid_idr, svm->pasid);
+ if (svm->mm)
+ mmput(svm->mm);
+ /* We mandate that no page faults may be outstanding
+ * for the PASID when intel_svm_unbind_mm() is called.
+ * If that is not obeyed, subtle errors will happen.
+ * Let's make them less subtle... */
+ memset(svm, 0x6b, sizeof(*svm));
+ kfree(svm);
+ }
+ }
+ break;
+ }
+ }
+ out:
+ mutex_unlock(&pasid_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_svm_unbind_mm);
+
+/* Page request queue descriptor */
+struct page_req_dsc {
+ u64 srr:1;
+ u64 bof:1;
+ u64 pasid_present:1;
+ u64 lpig:1;
+ u64 pasid:20;
+ u64 bus:8;
+ u64 private:23;
+ u64 prg_index:9;
+ u64 rd_req:1;
+ u64 wr_req:1;
+ u64 exe_req:1;
+ u64 priv_req:1;
+ u64 devfn:8;
+ u64 addr:52;
+};
+
+#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
+static irqreturn_t prq_event_thread(int irq, void *d)
+{
+ struct intel_iommu *iommu = d;
+ struct intel_svm *svm = NULL;
+ int head, tail, handled = 0;
+
+ tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
+ head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
+ while (head != tail) {
+ struct intel_svm_dev *sdev;
+ struct vm_area_struct *vma;
+ struct page_req_dsc *req;
+ struct qi_desc resp;
+ int ret, result;
+ u64 address;
+
+ handled = 1;
+
+ req = &iommu->prq[head / sizeof(*req)];
+
+ result = QI_RESP_FAILURE;
+ address = (u64)req->addr << VTD_PAGE_SHIFT;
+ if (!req->pasid_present) {
+ pr_err("%s: Page request without PASID: %08llx %08llx\n",
+ iommu->name, ((unsigned long long *)req)[0],
+ ((unsigned long long *)req)[1]);
+ goto bad_req;
+ }
+
+ if (!svm || svm->pasid != req->pasid) {
+ rcu_read_lock();
+ svm = idr_find(&iommu->pasid_idr, req->pasid);
+ /* It *can't* go away, because the driver is not permitted
+ * to unbind the mm while any page faults are outstanding.
+ * So we only need RCU to protect the internal idr code. */
+ rcu_read_unlock();
+
+ if (!svm) {
+ pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
+ iommu->name, req->pasid, ((unsigned long long *)req)[0],
+ ((unsigned long long *)req)[1]);
+ goto no_pasid;
+ }
+ }
+
+ result = QI_RESP_INVALID;
+ /* Since we're using init_mm.pgd directly, we should never take
+ * any faults on kernel addresses. */
+ if (!svm->mm)
+ goto bad_req;
+ down_read(&svm->mm->mmap_sem);
+ vma = find_extend_vma(svm->mm, address);
+ if (!vma || address < vma->vm_start)
+ goto invalid;
+
+ ret = handle_mm_fault(svm->mm, vma, address,
+ req->wr_req ? FAULT_FLAG_WRITE : 0);
+ if (ret & VM_FAULT_ERROR)
+ goto invalid;
+
+ result = QI_RESP_SUCCESS;
+ invalid:
+ up_read(&svm->mm->mmap_sem);
+ bad_req:
+ /* Accounting for major/minor faults? */
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdev, &svm->devs, list) {
+ if (sdev->sid == PCI_DEVID(req->bus, req->devfn))
+ break;
+ }
+ /* Other devices can go away, but the drivers are not permitted
+ * to unbind while any page faults might be in flight. So it's
+ * OK to drop the 'lock' here now we have it. */
+ rcu_read_unlock();
+
+ if (WARN_ON(&sdev->list == &svm->devs))
+ sdev = NULL;
+
+ if (sdev && sdev->ops && sdev->ops->fault_cb) {
+ int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
+ (req->exe_req << 1) | (req->priv_req);
+ sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr, req->private, rwxp, result);
+ }
+ /* We get here in the error case where the PASID lookup failed,
+ and these can be NULL. Do not use them below this point! */
+ sdev = NULL;
+ svm = NULL;
+ no_pasid:
+ if (req->lpig) {
+ /* Page Group Response */
+ resp.low = QI_PGRP_PASID(req->pasid) |
+ QI_PGRP_DID((req->bus << 8) | req->devfn) |
+ QI_PGRP_PASID_P(req->pasid_present) |
+ QI_PGRP_RESP_TYPE;
+ resp.high = QI_PGRP_IDX(req->prg_index) |
+ QI_PGRP_PRIV(req->private) | QI_PGRP_RESP_CODE(result);
+
+ qi_submit_sync(&resp, iommu);
+ } else if (req->srr) {
+ /* Page Stream Response */
+ resp.low = QI_PSTRM_IDX(req->prg_index) |
+ QI_PSTRM_PRIV(req->private) | QI_PSTRM_BUS(req->bus) |
+ QI_PSTRM_PASID(req->pasid) | QI_PSTRM_RESP_TYPE;
+ resp.high = QI_PSTRM_ADDR(address) | QI_PSTRM_DEVFN(req->devfn) |
+ QI_PSTRM_RESP_CODE(result);
+
+ qi_submit_sync(&resp, iommu);
+ }
+
+ head = (head + sizeof(*req)) & PRQ_RING_MASK;
+ }
+
+ dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
+
+ return IRQ_RETVAL(handled);
+}
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 9ec4e0d94ffd..1fae1881648c 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -169,8 +169,26 @@ static int modify_irte(struct irq_2_iommu *irq_iommu,
index = irq_iommu->irte_index + irq_iommu->sub_handle;
irte = &iommu->ir_table->base[index];
- set_64bit(&irte->low, irte_modified->low);
- set_64bit(&irte->high, irte_modified->high);
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
+ if ((irte->pst == 1) || (irte_modified->pst == 1)) {
+ bool ret;
+
+ ret = cmpxchg_double(&irte->low, &irte->high,
+ irte->low, irte->high,
+ irte_modified->low, irte_modified->high);
+ /*
+ * We use cmpxchg16 to atomically update the 128-bit IRTE,
+ * and it cannot be updated by the hardware or other processors
+ * behind us, so the return value of cmpxchg16 should be the
+ * same as the old value.
+ */
+ WARN_ON(!ret);
+ } else
+#endif
+ {
+ set_64bit(&irte->low, irte_modified->low);
+ set_64bit(&irte->high, irte_modified->high);
+ }
__iommu_flush_cache(iommu, irte, sizeof(*irte));
rc = qi_flush_iec(iommu, index, 0);
@@ -384,7 +402,7 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
static int iommu_load_old_irte(struct intel_iommu *iommu)
{
- struct irte __iomem *old_ir_table;
+ struct irte *old_ir_table;
phys_addr_t irt_phys;
unsigned int i;
size_t size;
@@ -408,12 +426,12 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
size = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);
/* Map the old IR table */
- old_ir_table = ioremap_cache(irt_phys, size);
+ old_ir_table = memremap(irt_phys, size, MEMREMAP_WB);
if (!old_ir_table)
return -ENOMEM;
/* Copy data over */
- memcpy_fromio(iommu->ir_table->base, old_ir_table, size);
+ memcpy(iommu->ir_table->base, old_ir_table, size);
__iommu_flush_cache(iommu, iommu->ir_table->base, size);
@@ -426,7 +444,7 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
bitmap_set(iommu->ir_table->bitmap, i, 1);
}
- iounmap(old_ir_table);
+ memunmap(old_ir_table);
return 0;
}
@@ -672,7 +690,7 @@ static int __init intel_prepare_irq_remapping(void)
if (!dmar_ir_support())
return -ENODEV;
- if (parse_ioapics_under_ir() != 1) {
+ if (parse_ioapics_under_ir()) {
pr_info("Not enabling interrupt remapping\n");
goto error;
}
@@ -727,7 +745,16 @@ static inline void set_irq_posting_cap(void)
struct intel_iommu *iommu;
if (!disable_irq_post) {
- intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
+ /*
+ * If IRTE is in posted format, the 'pda' field goes across the
+ * 64-bit boundary, we need use cmpxchg16b to atomically update
+ * it. We only expose posted-interrupt when X86_FEATURE_CX16
+ * is supported. Actually, hardware platforms supporting PI
+ * should have X86_FEATURE_CX16 support, this has been confirmed
+ * with Intel hardware guys.
+ */
+ if ( cpu_has_cx16 )
+ intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
for_each_iommu(iommu, drhd)
if (!cap_pi_support(iommu->cap)) {
@@ -907,16 +934,21 @@ static int __init parse_ioapics_under_ir(void)
bool ir_supported = false;
int ioapic_idx;
- for_each_iommu(iommu, drhd)
- if (ecap_ir_support(iommu->ecap)) {
- if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
- return -1;
+ for_each_iommu(iommu, drhd) {
+ int ret;
- ir_supported = true;
- }
+ if (!ecap_ir_support(iommu->ecap))
+ continue;
+
+ ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu);
+ if (ret)
+ return ret;
+
+ ir_supported = true;
+ }
if (!ir_supported)
- return 0;
+ return -ENODEV;
for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
int ioapic_id = mpc_ioapic_id(ioapic_idx);
@@ -928,7 +960,7 @@ static int __init parse_ioapics_under_ir(void)
}
}
- return 1;
+ return 0;
}
static int __init ir_dev_scope_init(void)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 049df495c274..abae363c7b9b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -728,16 +728,35 @@ static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
}
/*
+ * Generic device_group call-back function. It just allocates one
+ * iommu-group per device.
+ */
+struct iommu_group *generic_device_group(struct device *dev)
+{
+ struct iommu_group *group;
+
+ group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return NULL;
+
+ return group;
+}
+
+/*
* Use standard PCI bus topology, isolation features, and DMA alias quirks
* to find or create an IOMMU group for a device.
*/
-static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
+struct iommu_group *pci_device_group(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct group_for_pci_data data;
struct pci_bus *bus;
struct iommu_group *group = NULL;
u64 devfns[4] = { 0 };
+ if (WARN_ON(!dev_is_pci(dev)))
+ return ERR_PTR(-EINVAL);
+
/*
* Find the upstream DMA alias for the device. A device must not
* be aliased due to topology in order to have its own IOMMU group.
@@ -791,14 +810,6 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
if (IS_ERR(group))
return NULL;
- /*
- * Try to allocate a default domain - needs support from the
- * IOMMU driver.
- */
- group->default_domain = __iommu_domain_alloc(pdev->dev.bus,
- IOMMU_DOMAIN_DMA);
- group->domain = group->default_domain;
-
return group;
}
@@ -814,6 +825,7 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
*/
struct iommu_group *iommu_group_get_for_dev(struct device *dev)
{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_group *group;
int ret;
@@ -821,14 +833,24 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (group)
return group;
- if (!dev_is_pci(dev))
- return ERR_PTR(-EINVAL);
+ group = ERR_PTR(-EINVAL);
- group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
+ if (ops && ops->device_group)
+ group = ops->device_group(dev);
if (IS_ERR(group))
return group;
+ /*
+ * Try to allocate a default domain - needs support from the
+ * IOMMU driver.
+ */
+ if (!group->default_domain) {
+ group->default_domain = __iommu_domain_alloc(dev->bus,
+ IOMMU_DOMAIN_DMA);
+ group->domain = group->default_domain;
+ }
+
ret = iommu_group_add_device(group, dev);
if (ret) {
iommu_group_put(group);
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 913455a5fd40..8adaaeae3268 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -22,7 +22,7 @@ int irq_remap_broken;
int disable_sourceid_checking;
int no_x2apic_optout;
-int disable_irq_post = 1;
+int disable_irq_post = 0;
static int disable_irq_remap;
static struct irq_remap_ops *remap_ops;
@@ -58,14 +58,18 @@ static __init int setup_irqremap(char *str)
return -EINVAL;
while (*str) {
- if (!strncmp(str, "on", 2))
+ if (!strncmp(str, "on", 2)) {
disable_irq_remap = 0;
- else if (!strncmp(str, "off", 3))
+ disable_irq_post = 0;
+ } else if (!strncmp(str, "off", 3)) {
disable_irq_remap = 1;
- else if (!strncmp(str, "nosid", 5))
+ disable_irq_post = 1;
+ } else if (!strncmp(str, "nosid", 5))
disable_sourceid_checking = 1;
else if (!strncmp(str, "no_x2apic_optout", 16))
no_x2apic_optout = 1;
+ else if (!strncmp(str, "nopost", 6))
+ disable_irq_post = 1;
str += strcspn(str, ",");
while (*str == ',')
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 36d0033c2ccb..3dc5b65f3990 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -26,6 +26,8 @@
#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include <asm/cacheflush.h>
@@ -112,6 +114,18 @@ void omap_iommu_restore_ctx(struct device *dev)
}
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
+static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable)
+{
+ u32 val, mask;
+
+ if (!obj->syscfg)
+ return;
+
+ mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT));
+ val = enable ? mask : 0;
+ regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val);
+}
+
static void __iommu_set_twl(struct omap_iommu *obj, bool on)
{
u32 l = iommu_read_reg(obj, MMU_CNTL);
@@ -147,6 +161,8 @@ static int omap2_iommu_enable(struct omap_iommu *obj)
iommu_write_reg(obj, pa, MMU_TTB);
+ dra7_cfg_dspsys_mmu(obj, true);
+
if (obj->has_bus_err_back)
iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
@@ -161,6 +177,7 @@ static void omap2_iommu_disable(struct omap_iommu *obj)
l &= ~MMU_CNTL_MASK;
iommu_write_reg(obj, l, MMU_CNTL);
+ dra7_cfg_dspsys_mmu(obj, false);
dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}
@@ -864,6 +881,42 @@ static void omap_iommu_detach(struct omap_iommu *obj)
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
}
+static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
+ struct omap_iommu *obj)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
+ return 0;
+
+ if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
+ dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
+ return -EINVAL;
+ }
+
+ obj->syscfg =
+ syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
+ if (IS_ERR(obj->syscfg)) {
+ /* can fail with -EPROBE_DEFER */
+ ret = PTR_ERR(obj->syscfg);
+ return ret;
+ }
+
+ if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
+ &obj->id)) {
+ dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
+ return -EINVAL;
+ }
+
+ if (obj->id != 0 && obj->id != 1) {
+ dev_err(&pdev->dev, "invalid IOMMU instance id\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* OMAP Device MMU(IOMMU) detection
*/
@@ -907,6 +960,10 @@ static int omap_iommu_probe(struct platform_device *pdev)
if (IS_ERR(obj->regbase))
return PTR_ERR(obj->regbase);
+ err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj);
+ if (err)
+ return err;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENODEV;
@@ -943,6 +1000,7 @@ static const struct of_device_id omap_iommu_of_match[] = {
{ .compatible = "ti,omap2-iommu" },
{ .compatible = "ti,omap4-iommu" },
{ .compatible = "ti,dra7-iommu" },
+ { .compatible = "ti,dra7-dsp-iommu" },
{},
};
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index a656df2f9e03..59628e5017b4 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -30,6 +30,7 @@ struct iotlb_entry {
struct omap_iommu {
const char *name;
void __iomem *regbase;
+ struct regmap *syscfg;
struct device *dev;
struct iommu_domain *domain;
struct dentry *debug_dir;
@@ -48,6 +49,7 @@ struct omap_iommu {
void *ctx; /* iommu context: registres saved area */
int has_bus_err_back;
+ u32 id;
};
struct cr_regs {
@@ -159,6 +161,13 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
/*
+ * DSP_SYSTEM registers and bit definitions (applicable only for DRA7xx DSP)
+ */
+#define DSP_SYS_REVISION 0x00
+#define DSP_SYS_MMU_CONFIG 0x18
+#define DSP_SYS_MMU_CONFIG_EN_SHIFT 4
+
+/*
* utilities for super page(16MB, 1MB, 64KB and 4KB)
*/
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
new file mode 100644
index 000000000000..cbe198cb3699
--- /dev/null
+++ b/drivers/iommu/s390-iommu.c
@@ -0,0 +1,337 @@
+/*
+ * IOMMU API for s390 PCI devices
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/iommu.h>
+#include <linux/iommu-helper.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+#include <asm/pci_dma.h>
+
+/*
+ * Physically contiguous memory regions can be mapped with 4 KiB alignment,
+ * we allow all page sizes that are an order of 4KiB (no special large page
+ * support so far).
+ */
+#define S390_IOMMU_PGSIZES (~0xFFFUL)
+
+struct s390_domain {
+ struct iommu_domain domain;
+ struct list_head devices;
+ unsigned long *dma_table;
+ spinlock_t dma_table_lock;
+ spinlock_t list_lock;
+};
+
+struct s390_domain_device {
+ struct list_head list;
+ struct zpci_dev *zdev;
+};
+
+static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct s390_domain, domain);
+}
+
+static bool s390_iommu_capable(enum iommu_cap cap)
+{
+ switch (cap) {
+ case IOMMU_CAP_CACHE_COHERENCY:
+ return true;
+ case IOMMU_CAP_INTR_REMAP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct iommu_domain *s390_domain_alloc(unsigned domain_type)
+{
+ struct s390_domain *s390_domain;
+
+ if (domain_type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
+ if (!s390_domain)
+ return NULL;
+
+ s390_domain->dma_table = dma_alloc_cpu_table();
+ if (!s390_domain->dma_table) {
+ kfree(s390_domain);
+ return NULL;
+ }
+
+ spin_lock_init(&s390_domain->dma_table_lock);
+ spin_lock_init(&s390_domain->list_lock);
+ INIT_LIST_HEAD(&s390_domain->devices);
+
+ return &s390_domain->domain;
+}
+
+void s390_domain_free(struct iommu_domain *domain)
+{
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+
+ dma_cleanup_tables(s390_domain->dma_table);
+ kfree(s390_domain);
+}
+
+static int s390_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+ struct zpci_dev *zdev = to_pci_dev(dev)->sysdata;
+ struct s390_domain_device *domain_device;
+ unsigned long flags;
+ int rc;
+
+ if (!zdev)
+ return -ENODEV;
+
+ domain_device = kzalloc(sizeof(*domain_device), GFP_KERNEL);
+ if (!domain_device)
+ return -ENOMEM;
+
+ if (zdev->dma_table)
+ zpci_dma_exit_device(zdev);
+
+ zdev->dma_table = s390_domain->dma_table;
+ rc = zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
+ zdev->start_dma + zdev->iommu_size - 1,
+ (u64) zdev->dma_table);
+ if (rc)
+ goto out_restore;
+
+ spin_lock_irqsave(&s390_domain->list_lock, flags);
+ /* First device defines the DMA range limits */
+ if (list_empty(&s390_domain->devices)) {
+ domain->geometry.aperture_start = zdev->start_dma;
+ domain->geometry.aperture_end = zdev->end_dma;
+ domain->geometry.force_aperture = true;
+ /* Allow only devices with identical DMA range limits */
+ } else if (domain->geometry.aperture_start != zdev->start_dma ||
+ domain->geometry.aperture_end != zdev->end_dma) {
+ rc = -EINVAL;
+ spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+ goto out_restore;
+ }
+ domain_device->zdev = zdev;
+ zdev->s390_domain = s390_domain;
+ list_add(&domain_device->list, &s390_domain->devices);
+ spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+
+ return 0;
+
+out_restore:
+ zpci_dma_init_device(zdev);
+ kfree(domain_device);
+
+ return rc;
+}
+
+static void s390_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+ struct zpci_dev *zdev = to_pci_dev(dev)->sysdata;
+ struct s390_domain_device *domain_device, *tmp;
+ unsigned long flags;
+ int found = 0;
+
+ if (!zdev)
+ return;
+
+ spin_lock_irqsave(&s390_domain->list_lock, flags);
+ list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices,
+ list) {
+ if (domain_device->zdev == zdev) {
+ list_del(&domain_device->list);
+ kfree(domain_device);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+
+ if (found) {
+ zdev->s390_domain = NULL;
+ zpci_unregister_ioat(zdev, 0);
+ zpci_dma_init_device(zdev);
+ }
+}
+
+static int s390_iommu_add_device(struct device *dev)
+{
+ struct iommu_group *group;
+ int rc;
+
+ group = iommu_group_get(dev);
+ if (!group) {
+ group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+ }
+
+ rc = iommu_group_add_device(group, dev);
+ iommu_group_put(group);
+
+ return rc;
+}
+
+static void s390_iommu_remove_device(struct device *dev)
+{
+ struct zpci_dev *zdev = to_pci_dev(dev)->sysdata;
+ struct iommu_domain *domain;
+
+ /*
+ * This is a workaround for a scenario where the IOMMU API common code
+ * "forgets" to call the detach_dev callback: After binding a device
+ * to vfio-pci and completing the VFIO_SET_IOMMU ioctl (which triggers
+ * the attach_dev), removing the device via
+ * "echo 1 > /sys/bus/pci/devices/.../remove" won't trigger detach_dev,
+ * only remove_device will be called via the BUS_NOTIFY_REMOVED_DEVICE
+ * notifier.
+ *
+ * So let's call detach_dev from here if it hasn't been called before.
+ */
+ if (zdev && zdev->s390_domain) {
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain)
+ s390_iommu_detach_device(domain, dev);
+ }
+
+ iommu_group_remove_device(dev);
+}
+
+static int s390_iommu_update_trans(struct s390_domain *s390_domain,
+ unsigned long pa, dma_addr_t dma_addr,
+ size_t size, int flags)
+{
+ struct s390_domain_device *domain_device;
+ u8 *page_addr = (u8 *) (pa & PAGE_MASK);
+ dma_addr_t start_dma_addr = dma_addr;
+ unsigned long irq_flags, nr_pages, i;
+ int rc = 0;
+
+ if (dma_addr < s390_domain->domain.geometry.aperture_start ||
+ dma_addr + size > s390_domain->domain.geometry.aperture_end)
+ return -EINVAL;
+
+ nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ if (!nr_pages)
+ return 0;
+
+ spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
+ for (i = 0; i < nr_pages; i++) {
+ dma_update_cpu_trans(s390_domain->dma_table, page_addr,
+ dma_addr, flags);
+ page_addr += PAGE_SIZE;
+ dma_addr += PAGE_SIZE;
+ }
+
+ spin_lock(&s390_domain->list_lock);
+ list_for_each_entry(domain_device, &s390_domain->devices, list) {
+ rc = zpci_refresh_trans((u64) domain_device->zdev->fh << 32,
+ start_dma_addr, nr_pages * PAGE_SIZE);
+ if (rc)
+ break;
+ }
+ spin_unlock(&s390_domain->list_lock);
+ spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
+
+ return rc;
+}
+
+static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+ int flags = ZPCI_PTE_VALID, rc = 0;
+
+ if (!(prot & IOMMU_READ))
+ return -EINVAL;
+
+ if (!(prot & IOMMU_WRITE))
+ flags |= ZPCI_TABLE_PROTECTED;
+
+ rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova,
+ size, flags);
+
+ return rc;
+}
+
+static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+ unsigned long *sto, *pto, *rto, flags;
+ unsigned int rtx, sx, px;
+ phys_addr_t phys = 0;
+
+ if (iova < domain->geometry.aperture_start ||
+ iova > domain->geometry.aperture_end)
+ return 0;
+
+ rtx = calc_rtx(iova);
+ sx = calc_sx(iova);
+ px = calc_px(iova);
+ rto = s390_domain->dma_table;
+
+ spin_lock_irqsave(&s390_domain->dma_table_lock, flags);
+ if (rto && reg_entry_isvalid(rto[rtx])) {
+ sto = get_rt_sto(rto[rtx]);
+ if (sto && reg_entry_isvalid(sto[sx])) {
+ pto = get_st_pto(sto[sx]);
+ if (pto && pt_entry_isvalid(pto[px]))
+ phys = pto[px] & ZPCI_PTE_ADDR_MASK;
+ }
+ }
+ spin_unlock_irqrestore(&s390_domain->dma_table_lock, flags);
+
+ return phys;
+}
+
+static size_t s390_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+ int flags = ZPCI_PTE_INVALID;
+ phys_addr_t paddr;
+ int rc;
+
+ paddr = s390_iommu_iova_to_phys(domain, iova);
+ if (!paddr)
+ return 0;
+
+ rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova,
+ size, flags);
+ if (rc)
+ return 0;
+
+ return size;
+}
+
+static struct iommu_ops s390_iommu_ops = {
+ .capable = s390_iommu_capable,
+ .domain_alloc = s390_domain_alloc,
+ .domain_free = s390_domain_free,
+ .attach_dev = s390_iommu_attach_device,
+ .detach_dev = s390_iommu_detach_device,
+ .map = s390_iommu_map,
+ .unmap = s390_iommu_unmap,
+ .iova_to_phys = s390_iommu_iova_to_phys,
+ .add_device = s390_iommu_add_device,
+ .remove_device = s390_iommu_remove_device,
+ .pgsize_bitmap = S390_IOMMU_PGSIZES,
+};
+
+static int __init s390_iommu_init(void)
+{
+ return bus_set_iommu(&pci_bus_type, &s390_iommu_ops);
+}
+subsys_initcall(s390_iommu_init);
diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c
index db3ba8b42517..314159610d24 100644
--- a/drivers/leds/leds-dac124s085.c
+++ b/drivers/leds/leds-dac124s085.c
@@ -122,7 +122,6 @@ static struct spi_driver dac124s085_driver = {
.remove = dac124s085_remove,
.driver = {
.name = "dac124s085",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index 833191bcd810..ccc1f43cb9a9 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -32,9 +32,9 @@
#include <linux/time.h>
#include <linux/dvb/dmx.h>
-/*--------------------------------------------------------------------------*/
-/* Common definitions */
-/*--------------------------------------------------------------------------*/
+/*
+ * Common definitions
+ */
/*
* DMX_MAX_FILTER_SIZE: Maximum length (in bytes) of a section/PES filter.
@@ -45,7 +45,8 @@
#endif
/*
- * DMX_MAX_SECFEED_SIZE: Maximum length (in bytes) of a private section feed filter.
+ * DMX_MAX_SECFEED_SIZE: Maximum length (in bytes) of a private section feed
+ * filter.
*/
#ifndef DMX_MAX_SECTION_SIZE
@@ -55,139 +56,296 @@
#define DMX_MAX_SECFEED_SIZE (DMX_MAX_SECTION_SIZE + 188)
#endif
-
/*
- * enum dmx_success: Success codes for the Demux Callback API.
+ * TS packet reception
*/
-enum dmx_success {
- DMX_OK = 0, /* Received Ok */
- DMX_LENGTH_ERROR, /* Incorrect length */
- DMX_OVERRUN_ERROR, /* Receiver ring buffer overrun */
- DMX_CRC_ERROR, /* Incorrect CRC */
- DMX_FRAME_ERROR, /* Frame alignment error */
- DMX_FIFO_ERROR, /* Receiver FIFO overrun */
- DMX_MISSED_ERROR /* Receiver missed packet */
-} ;
-
-/*--------------------------------------------------------------------------*/
-/* TS packet reception */
-/*--------------------------------------------------------------------------*/
-
-/* TS filter type for set() */
-
-#define TS_PACKET 1 /* send TS packets (188 bytes) to callback (default) */
-#define TS_PAYLOAD_ONLY 2 /* in case TS_PACKET is set, only send the TS
- payload (<=184 bytes per packet) to callback */
-#define TS_DECODER 4 /* send stream to built-in decoder (if present) */
-#define TS_DEMUX 8 /* in case TS_PACKET is set, send the TS to
- the demux device, not to the dvr device */
+/**
+ * enum ts_filter_type - filter type bitmap for dmx_ts_feed.set()
+ *
+ * @TS_PACKET: Send TS packets (188 bytes) to callback (default).
+ * @TS_PAYLOAD_ONLY: In case TS_PACKET is set, only send the TS payload
+ * (<=184 bytes per packet) to callback
+ * @TS_DECODER: Send stream to built-in decoder (if present).
+ * @TS_DEMUX: In case TS_PACKET is set, send the TS to the demux
+ * device, not to the dvr device
+ */
+enum ts_filter_type {
+ TS_PACKET = 1,
+ TS_PAYLOAD_ONLY = 2,
+ TS_DECODER = 4,
+ TS_DEMUX = 8,
+};
+/**
+ * struct dmx_ts_feed - Structure that contains a TS feed filter
+ *
+ * @is_filtering: Set to non-zero when filtering in progress
+ * @parent: pointer to struct dmx_demux
+ * @priv: pointer to private data of the API client
+ * @set: sets the TS filter
+ * @start_filtering: starts TS filtering
+ * @stop_filtering: stops TS filtering
+ *
+ * A TS feed is typically mapped to a hardware PID filter on the demux chip.
+ * Using this API, the client can set the filtering properties to start/stop
+ * filtering TS packets on a particular TS feed.
+ */
struct dmx_ts_feed {
- int is_filtering; /* Set to non-zero when filtering in progress */
- struct dmx_demux *parent; /* Back-pointer */
- void *priv; /* Pointer to private data of the API client */
- int (*set) (struct dmx_ts_feed *feed,
- u16 pid,
- int type,
- enum dmx_ts_pes pes_type,
- size_t circular_buffer_size,
- struct timespec timeout);
- int (*start_filtering) (struct dmx_ts_feed* feed);
- int (*stop_filtering) (struct dmx_ts_feed* feed);
+ int is_filtering;
+ struct dmx_demux *parent;
+ void *priv;
+ int (*set)(struct dmx_ts_feed *feed,
+ u16 pid,
+ int type,
+ enum dmx_ts_pes pes_type,
+ size_t circular_buffer_size,
+ struct timespec timeout);
+ int (*start_filtering)(struct dmx_ts_feed *feed);
+ int (*stop_filtering)(struct dmx_ts_feed *feed);
};
-/*--------------------------------------------------------------------------*/
-/* Section reception */
-/*--------------------------------------------------------------------------*/
+/*
+ * Section reception
+ */
+/**
+ * struct dmx_section_filter - Structure that describes a section filter
+ *
+ * @filter_value: Contains up to 16 bytes (128 bits) of the TS section header
+ * that will be matched by the section filter
+ * @filter_mask: Contains a 16 bytes (128 bits) filter mask with the bits
+ * specified by @filter_value that will be used on the filter
+ * match logic.
+ * @filter_mode: Contains a 16 bytes (128 bits) filter mode.
+ * @parent: Pointer to struct dmx_section_feed.
+ * @priv: Pointer to private data of the API client.
+ *
+ *
+ * The @filter_mask controls which bits of @filter_value are compared with
+ * the section headers/payload. On a binary value of 1 in filter_mask, the
+ * corresponding bits are compared. The filter only accepts sections that are
+ * equal to filter_value in all the tested bit positions.
+ */
struct dmx_section_filter {
- u8 filter_value [DMX_MAX_FILTER_SIZE];
- u8 filter_mask [DMX_MAX_FILTER_SIZE];
- u8 filter_mode [DMX_MAX_FILTER_SIZE];
- struct dmx_section_feed* parent; /* Back-pointer */
- void* priv; /* Pointer to private data of the API client */
+ u8 filter_value[DMX_MAX_FILTER_SIZE];
+ u8 filter_mask[DMX_MAX_FILTER_SIZE];
+ u8 filter_mode[DMX_MAX_FILTER_SIZE];
+ struct dmx_section_feed *parent; /* Back-pointer */
+ void *priv; /* Pointer to private data of the API client */
};
+/**
+ * struct dmx_section_feed - Structure that contains a section feed filter
+ *
+ * @is_filtering: Set to non-zero when filtering in progress
+ * @parent: pointer to struct dmx_demux
+ * @priv: pointer to private data of the API client
+ * @check_crc: If non-zero, check the CRC values of filtered sections.
+ * @set: sets the section filter
+ * @allocate_filter: This function is used to allocate a section filter on
+ * the demux. It should only be called when no filtering
+ * is in progress on this section feed. If a filter cannot
+ * be allocated, the function fails with -ENOSPC.
+ * @release_filter: This function releases all the resources of a
+ * previously allocated section filter. The function
+ * should not be called while filtering is in progress
+ * on this section feed. After calling this function,
+ * the caller should not try to dereference the filter
+ * pointer.
+ * @start_filtering: starts section filtering
+ * @stop_filtering: stops section filtering
+ *
+ * A TS feed is typically mapped to a hardware PID filter on the demux chip.
+ * Using this API, the client can set the filtering properties to start/stop
+ * filtering TS packets on a particular TS feed.
+ */
struct dmx_section_feed {
- int is_filtering; /* Set to non-zero when filtering in progress */
- struct dmx_demux* parent; /* Back-pointer */
- void* priv; /* Pointer to private data of the API client */
+ int is_filtering;
+ struct dmx_demux *parent;
+ void *priv;
int check_crc;
+
+ /* private: Used internally at dvb_demux.c */
u32 crc_val;
u8 *secbuf;
u8 secbuf_base[DMX_MAX_SECFEED_SIZE];
u16 secbufp, seclen, tsfeedp;
- int (*set) (struct dmx_section_feed* feed,
- u16 pid,
- size_t circular_buffer_size,
- int check_crc);
- int (*allocate_filter) (struct dmx_section_feed* feed,
- struct dmx_section_filter** filter);
- int (*release_filter) (struct dmx_section_feed* feed,
- struct dmx_section_filter* filter);
- int (*start_filtering) (struct dmx_section_feed* feed);
- int (*stop_filtering) (struct dmx_section_feed* feed);
+ /* public: */
+ int (*set)(struct dmx_section_feed *feed,
+ u16 pid,
+ size_t circular_buffer_size,
+ int check_crc);
+ int (*allocate_filter)(struct dmx_section_feed *feed,
+ struct dmx_section_filter **filter);
+ int (*release_filter)(struct dmx_section_feed *feed,
+ struct dmx_section_filter *filter);
+ int (*start_filtering)(struct dmx_section_feed *feed);
+ int (*stop_filtering)(struct dmx_section_feed *feed);
};
-/*--------------------------------------------------------------------------*/
-/* Callback functions */
-/*--------------------------------------------------------------------------*/
-
-typedef int (*dmx_ts_cb) ( const u8 * buffer1,
- size_t buffer1_length,
- const u8 * buffer2,
- size_t buffer2_length,
- struct dmx_ts_feed* source,
- enum dmx_success success);
+/*
+ * Callback functions
+ */
-typedef int (*dmx_section_cb) ( const u8 * buffer1,
- size_t buffer1_len,
- const u8 * buffer2,
- size_t buffer2_len,
- struct dmx_section_filter * source,
- enum dmx_success success);
+/**
+ * typedef dmx_ts_cb - DVB demux TS filter callback function prototype
+ *
+ * @buffer1: Pointer to the start of the filtered TS packets.
+ * @buffer1_length: Length of the TS data in buffer1.
+ * @buffer2: Pointer to the tail of the filtered TS packets, or NULL.
+ * @buffer2_length: Length of the TS data in buffer2.
+ * @source: Indicates which TS feed is the source of the callback.
+ *
+ * This function callback prototype, provided by the client of the demux API,
+ * is called from the demux code. The function is only called when filtering
+ * on ae TS feed has been enabled using the start_filtering() function at
+ * the &dmx_demux.
+ * Any TS packets that match the filter settings are copied to a circular
+ * buffer. The filtered TS packets are delivered to the client using this
+ * callback function. The size of the circular buffer is controlled by the
+ * circular_buffer_size parameter of the &dmx_ts_feed.@set function.
+ * It is expected that the @buffer1 and @buffer2 callback parameters point to
+ * addresses within the circular buffer, but other implementations are also
+ * possible. Note that the called party should not try to free the memory
+ * the @buffer1 and @buffer2 parameters point to.
+ *
+ * When this function is called, the @buffer1 parameter typically points to
+ * the start of the first undelivered TS packet within a circular buffer.
+ * The @buffer2 buffer parameter is normally NULL, except when the received
+ * TS packets have crossed the last address of the circular buffer and
+ * ”wrapped” to the beginning of the buffer. In the latter case the @buffer1
+ * parameter would contain an address within the circular buffer, while the
+ * @buffer2 parameter would contain the first address of the circular buffer.
+ * The number of bytes delivered with this function (i.e. @buffer1_length +
+ * @buffer2_length) is usually equal to the value of callback_length parameter
+ * given in the set() function, with one exception: if a timeout occurs before
+ * receiving callback_length bytes of TS data, any undelivered packets are
+ * immediately delivered to the client by calling this function. The timeout
+ * duration is controlled by the set() function in the TS Feed API.
+ *
+ * If a TS packet is received with errors that could not be fixed by the
+ * TS-level forward error correction (FEC), the Transport_error_indicator
+ * flag of the TS packet header should be set. The TS packet should not be
+ * discarded, as the error can possibly be corrected by a higher layer
+ * protocol. If the called party is slow in processing the callback, it
+ * is possible that the circular buffer eventually fills up. If this happens,
+ * the demux driver should discard any TS packets received while the buffer
+ * is full and return -EOVERFLOW.
+ *
+ * The type of data returned to the callback can be selected by the
+ * &dmx_ts_feed.@set function. The type parameter decides if the raw
+ * TS packet (TS_PACKET) or just the payload (TS_PACKET|TS_PAYLOAD_ONLY)
+ * should be returned. If additionally the TS_DECODER bit is set the stream
+ * will also be sent to the hardware MPEG decoder.
+ *
+ * Return:
+ * 0, on success;
+ * -EOVERFLOW, on buffer overflow.
+ */
+typedef int (*dmx_ts_cb)(const u8 *buffer1,
+ size_t buffer1_length,
+ const u8 *buffer2,
+ size_t buffer2_length,
+ struct dmx_ts_feed *source);
+
+/**
+ * typedef dmx_section_cb - DVB demux TS filter callback function prototype
+ *
+ * @buffer1: Pointer to the start of the filtered section, e.g.
+ * within the circular buffer of the demux driver.
+ * @buffer1_len: Length of the filtered section data in @buffer1,
+ * including headers and CRC.
+ * @buffer2: Pointer to the tail of the filtered section data,
+ * or NULL. Useful to handle the wrapping of a
+ * circular buffer.
+ * @buffer2_len: Length of the filtered section data in @buffer2,
+ * including headers and CRC.
+ * @source: Indicates which section feed is the source of the
+ * callback.
+ *
+ * This function callback prototype, provided by the client of the demux API,
+ * is called from the demux code. The function is only called when
+ * filtering of sections has been enabled using the function
+ * &dmx_ts_feed.@start_filtering. When the demux driver has received a
+ * complete section that matches at least one section filter, the client
+ * is notified via this callback function. Normally this function is called
+ * for each received section; however, it is also possible to deliver
+ * multiple sections with one callback, for example when the system load
+ * is high. If an error occurs while receiving a section, this
+ * function should be called with the corresponding error type set in the
+ * success field, whether or not there is data to deliver. The Section Feed
+ * implementation should maintain a circular buffer for received sections.
+ * However, this is not necessary if the Section Feed API is implemented as
+ * a client of the TS Feed API, because the TS Feed implementation then
+ * buffers the received data. The size of the circular buffer can be
+ * configured using the &dmx_ts_feed.@set function in the Section Feed API.
+ * If there is no room in the circular buffer when a new section is received,
+ * the section must be discarded. If this happens, the value of the success
+ * parameter should be DMX_OVERRUN_ERROR on the next callback.
+ */
+typedef int (*dmx_section_cb)(const u8 *buffer1,
+ size_t buffer1_len,
+ const u8 *buffer2,
+ size_t buffer2_len,
+ struct dmx_section_filter *source);
/*--------------------------------------------------------------------------*/
/* DVB Front-End */
/*--------------------------------------------------------------------------*/
+/**
+ * enum dmx_frontend_source - Used to identify the type of frontend
+ *
+ * @DMX_MEMORY_FE: The source of the demux is memory. It means that
+ * the MPEG-TS to be filtered comes from userspace,
+ * via write() syscall.
+ *
+ * @DMX_FRONTEND_0: The source of the demux is a frontend connected
+ * to the demux.
+ */
enum dmx_frontend_source {
DMX_MEMORY_FE,
DMX_FRONTEND_0,
- DMX_FRONTEND_1,
- DMX_FRONTEND_2,
- DMX_FRONTEND_3,
- DMX_STREAM_0, /* external stream input, e.g. LVDS */
- DMX_STREAM_1,
- DMX_STREAM_2,
- DMX_STREAM_3
};
+/**
+ * struct dmx_frontend - Structure that lists the frontends associated with
+ * a demux
+ *
+ * @connectivity_list: List of front-ends that can be connected to a
+ * particular demux;
+ * @source: Type of the frontend.
+ *
+ * FIXME: this structure should likely be replaced soon by some
+ * media-controller based logic.
+ */
struct dmx_frontend {
- struct list_head connectivity_list; /* List of front-ends that can
- be connected to a particular
- demux */
+ struct list_head connectivity_list;
enum dmx_frontend_source source;
};
-/*--------------------------------------------------------------------------*/
-/* MPEG-2 TS Demux */
-/*--------------------------------------------------------------------------*/
-
/*
- * Flags OR'ed in the capabilities field of struct dmx_demux.
+ * MPEG-2 TS Demux
*/
-#define DMX_TS_FILTERING 1
-#define DMX_PES_FILTERING 2
-#define DMX_SECTION_FILTERING 4
-#define DMX_MEMORY_BASED_FILTERING 8 /* write() available */
-#define DMX_CRC_CHECKING 16
-#define DMX_TS_DESCRAMBLING 32
+/**
+ * enum dmx_demux_caps - MPEG-2 TS Demux capabilities bitmap
+ *
+ * @DMX_TS_FILTERING: set if TS filtering is supported;
+ * @DMX_SECTION_FILTERING: set if section filtering is supported;
+ * @DMX_MEMORY_BASED_FILTERING: set if write() available.
+ *
+ * Those flags are OR'ed in the &dmx_demux.&capabilities field
+ */
+enum dmx_demux_caps {
+ DMX_TS_FILTERING = 1,
+ DMX_SECTION_FILTERING = 4,
+ DMX_MEMORY_BASED_FILTERING = 8,
+};
/*
* Demux resource type identifier.
@@ -200,42 +358,241 @@ struct dmx_frontend {
*.
*/
-#define DMX_FE_ENTRY(list) list_entry(list, struct dmx_frontend, connectivity_list)
+#define DMX_FE_ENTRY(list) \
+ list_entry(list, struct dmx_frontend, connectivity_list)
+
+/**
+ * struct dmx_demux - Structure that contains the demux capabilities and
+ * callbacks.
+ *
+ * @capabilities: Bitfield of capability flags.
+ *
+ * @frontend: Front-end connected to the demux
+ *
+ * @priv: Pointer to private data of the API client
+ *
+ * @open: This function reserves the demux for use by the caller and, if
+ * necessary, initializes the demux. When the demux is no longer needed,
+ * the function @close should be called. It should be possible for
+ * multiple clients to access the demux at the same time. Thus, the
+ * function implementation should increment the demux usage count when
+ * @open is called and decrement it when @close is called.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * It returns
+ * 0 on success;
+ * -EUSERS, if maximum usage count was reached;
+ * -EINVAL, on bad parameter.
+ *
+ * @close: This function reserves the demux for use by the caller and, if
+ * necessary, initializes the demux. When the demux is no longer needed,
+ * the function @close should be called. It should be possible for
+ * multiple clients to access the demux at the same time. Thus, the
+ * function implementation should increment the demux usage count when
+ * @open is called and decrement it when @close is called.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * It returns
+ * 0 on success;
+ * -ENODEV, if demux was not in use (e. g. no users);
+ * -EINVAL, on bad parameter.
+ *
+ * @write: This function provides the demux driver with a memory buffer
+ * containing TS packets. Instead of receiving TS packets from the DVB
+ * front-end, the demux driver software will read packets from memory.
+ * Any clients of this demux with active TS, PES or Section filters will
+ * receive filtered data via the Demux callback API (see 0). The function
+ * returns when all the data in the buffer has been consumed by the demux.
+ * Demux hardware typically cannot read TS from memory. If this is the
+ * case, memory-based filtering has to be implemented entirely in software.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @buf function parameter contains a pointer to the TS data in
+ * kernel-space memory.
+ * The @count function parameter contains the length of the TS data.
+ * It returns
+ * 0 on success;
+ * -ERESTARTSYS, if mutex lock was interrupted;
+ * -EINTR, if a signal handling is pending;
+ * -ENODEV, if demux was removed;
+ * -EINVAL, on bad parameter.
+ *
+ * @allocate_ts_feed: Allocates a new TS feed, which is used to filter the TS
+ * packets carrying a certain PID. The TS feed normally corresponds to a
+ * hardware PID filter on the demux chip.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @feed function parameter contains a pointer to the TS feed API and
+ * instance data.
+ * The @callback function parameter contains a pointer to the callback
+ * function for passing received TS packet.
+ * It returns
+ * 0 on success;
+ * -ERESTARTSYS, if mutex lock was interrupted;
+ * -EBUSY, if no more TS feeds is available;
+ * -EINVAL, on bad parameter.
+ *
+ * @release_ts_feed: Releases the resources allocated with @allocate_ts_feed.
+ * Any filtering in progress on the TS feed should be stopped before
+ * calling this function.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @feed function parameter contains a pointer to the TS feed API and
+ * instance data.
+ * It returns
+ * 0 on success;
+ * -EINVAL on bad parameter.
+ *
+ * @allocate_section_feed: Allocates a new section feed, i.e. a demux resource
+ * for filtering and receiving sections. On platforms with hardware
+ * support for section filtering, a section feed is directly mapped to
+ * the demux HW. On other platforms, TS packets are first PID filtered in
+ * hardware and a hardware section filter then emulated in software. The
+ * caller obtains an API pointer of type dmx_section_feed_t as an out
+ * parameter. Using this API the caller can set filtering parameters and
+ * start receiving sections.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @feed function parameter contains a pointer to the TS feed API and
+ * instance data.
+ * The @callback function parameter contains a pointer to the callback
+ * function for passing received TS packet.
+ * It returns
+ * 0 on success;
+ * -EBUSY, if no more TS feeds is available;
+ * -EINVAL, on bad parameter.
+ *
+ * @release_section_feed: Releases the resources allocated with
+ * @allocate_section_feed, including allocated filters. Any filtering in
+ * progress on the section feed should be stopped before calling this
+ * function.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @feed function parameter contains a pointer to the TS feed API and
+ * instance data.
+ * It returns
+ * 0 on success;
+ * -EINVAL, on bad parameter.
+ *
+ * @add_frontend: Registers a connectivity between a demux and a front-end,
+ * i.e., indicates that the demux can be connected via a call to
+ * @connect_frontend to use the given front-end as a TS source. The
+ * client of this function has to allocate dynamic or static memory for
+ * the frontend structure and initialize its fields before calling this
+ * function. This function is normally called during the driver
+ * initialization. The caller must not free the memory of the frontend
+ * struct before successfully calling @remove_frontend.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @frontend function parameter contains a pointer to the front-end
+ * instance data.
+ * It returns
+ * 0 on success;
+ * -EINVAL, on bad parameter.
+ *
+ * @remove_frontend: Indicates that the given front-end, registered by a call
+ * to @add_frontend, can no longer be connected as a TS source by this
+ * demux. The function should be called when a front-end driver or a demux
+ * driver is removed from the system. If the front-end is in use, the
+ * function fails with the return value of -EBUSY. After successfully
+ * calling this function, the caller can free the memory of the frontend
+ * struct if it was dynamically allocated before the @add_frontend
+ * operation.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @frontend function parameter contains a pointer to the front-end
+ * instance data.
+ * It returns
+ * 0 on success;
+ * -ENODEV, if the front-end was not found,
+ * -EINVAL, on bad parameter.
+ *
+ * @get_frontends: Provides the APIs of the front-ends that have been
+ * registered for this demux. Any of the front-ends obtained with this
+ * call can be used as a parameter for @connect_frontend. The include
+ * file demux.h contains the macro DMX_FE_ENTRY() for converting an
+ * element of the generic type struct &list_head * to the type
+ * struct &dmx_frontend *. The caller must not free the memory of any of
+ * the elements obtained via this function call.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * It returns a struct list_head pointer to the list of front-end
+ * interfaces, or NULL in the case of an empty list.
+ *
+ * @connect_frontend: Connects the TS output of the front-end to the input of
+ * the demux. A demux can only be connected to a front-end registered to
+ * the demux with the function @add_frontend. It may or may not be
+ * possible to connect multiple demuxes to the same front-end, depending
+ * on the capabilities of the HW platform. When not used, the front-end
+ * should be released by calling @disconnect_frontend.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @frontend function parameter contains a pointer to the front-end
+ * instance data.
+ * It returns
+ * 0 on success;
+ * -EINVAL, on bad parameter.
+ *
+ * @disconnect_frontend: Disconnects the demux and a front-end previously
+ * connected by a @connect_frontend call.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * It returns
+ * 0 on success;
+ * -EINVAL on bad parameter.
+ *
+ * @get_pes_pids: Get the PIDs for DMX_PES_AUDIO0, DMX_PES_VIDEO0,
+ * DMX_PES_TELETEXT0, DMX_PES_SUBTITLE0 and DMX_PES_PCR0.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @pids function parameter contains an array with five u16 elements
+ * where the PIDs will be stored.
+ * It returns
+ * 0 on success;
+ * -EINVAL on bad parameter.
+ */
struct dmx_demux {
- u32 capabilities; /* Bitfield of capability flags */
- struct dmx_frontend* frontend; /* Front-end connected to the demux */
- void* priv; /* Pointer to private data of the API client */
- int (*open) (struct dmx_demux* demux);
- int (*close) (struct dmx_demux* demux);
- int (*write) (struct dmx_demux* demux, const char __user *buf, size_t count);
- int (*allocate_ts_feed) (struct dmx_demux* demux,
- struct dmx_ts_feed** feed,
- dmx_ts_cb callback);
- int (*release_ts_feed) (struct dmx_demux* demux,
- struct dmx_ts_feed* feed);
- int (*allocate_section_feed) (struct dmx_demux* demux,
- struct dmx_section_feed** feed,
- dmx_section_cb callback);
- int (*release_section_feed) (struct dmx_demux* demux,
- struct dmx_section_feed* feed);
- int (*add_frontend) (struct dmx_demux* demux,
- struct dmx_frontend* frontend);
- int (*remove_frontend) (struct dmx_demux* demux,
- struct dmx_frontend* frontend);
- struct list_head* (*get_frontends) (struct dmx_demux* demux);
- int (*connect_frontend) (struct dmx_demux* demux,
- struct dmx_frontend* frontend);
- int (*disconnect_frontend) (struct dmx_demux* demux);
-
- int (*get_pes_pids) (struct dmx_demux* demux, u16 *pids);
-
- int (*get_caps) (struct dmx_demux* demux, struct dmx_caps *caps);
-
- int (*set_source) (struct dmx_demux* demux, const dmx_source_t *src);
-
- int (*get_stc) (struct dmx_demux* demux, unsigned int num,
- u64 *stc, unsigned int *base);
+ enum dmx_demux_caps capabilities;
+ struct dmx_frontend *frontend;
+ void *priv;
+ int (*open)(struct dmx_demux *demux);
+ int (*close)(struct dmx_demux *demux);
+ int (*write)(struct dmx_demux *demux, const char __user *buf,
+ size_t count);
+ int (*allocate_ts_feed)(struct dmx_demux *demux,
+ struct dmx_ts_feed **feed,
+ dmx_ts_cb callback);
+ int (*release_ts_feed)(struct dmx_demux *demux,
+ struct dmx_ts_feed *feed);
+ int (*allocate_section_feed)(struct dmx_demux *demux,
+ struct dmx_section_feed **feed,
+ dmx_section_cb callback);
+ int (*release_section_feed)(struct dmx_demux *demux,
+ struct dmx_section_feed *feed);
+ int (*add_frontend)(struct dmx_demux *demux,
+ struct dmx_frontend *frontend);
+ int (*remove_frontend)(struct dmx_demux *demux,
+ struct dmx_frontend *frontend);
+ struct list_head *(*get_frontends)(struct dmx_demux *demux);
+ int (*connect_frontend)(struct dmx_demux *demux,
+ struct dmx_frontend *frontend);
+ int (*disconnect_frontend)(struct dmx_demux *demux);
+
+ int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids);
+
+ /* private: Not used upstream and never documented */
+#if 0
+ int (*get_caps)(struct dmx_demux *demux, struct dmx_caps *caps);
+ int (*set_source)(struct dmx_demux *demux, const dmx_source_t *src);
+#endif
+ /*
+ * private: Only used at av7110, to read some data from firmware.
+ * As this was never documented, we have no clue about what's
+ * there, and its usage on other drivers aren't encouraged.
+ */
+ int (*get_stc)(struct dmx_demux *demux, unsigned int num,
+ u64 *stc, unsigned int *base);
};
#endif /* #ifndef __DEMUX_H */
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index d0e3f9d85f34..ea9abde902e9 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -352,8 +352,7 @@ static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
const u8 *buffer2, size_t buffer2_len,
- struct dmx_section_filter *filter,
- enum dmx_success success)
+ struct dmx_section_filter *filter)
{
struct dmxdev_filter *dmxdevfilter = filter->priv;
int ret;
@@ -386,8 +385,7 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
const u8 *buffer2, size_t buffer2_len,
- struct dmx_ts_feed *feed,
- enum dmx_success success)
+ struct dmx_ts_feed *feed)
{
struct dmxdev_filter *dmxdevfilter = feed->priv;
struct dvb_ringbuffer *buffer;
@@ -1023,6 +1021,9 @@ static int dvb_demux_do_ioctl(struct file *file,
dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
break;
+#if 0
+ /* Not used upstream and never documented */
+
case DMX_GET_CAPS:
if (!dmxdev->demux->get_caps) {
ret = -EINVAL;
@@ -1038,6 +1039,7 @@ static int dvb_demux_do_ioctl(struct file *file,
}
ret = dmxdev->demux->set_source(dmxdev->demux, parg);
break;
+#endif
case DMX_GET_STC:
if (!dmxdev->demux->get_stc) {
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index c117fb3b4aff..0a46580b5376 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -257,6 +257,7 @@
#define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab
#define USB_PID_TERRATEC_H7 0x10b4
#define USB_PID_TERRATEC_H7_2 0x10a3
+#define USB_PID_TERRATEC_H7_3 0x10a5
#define USB_PID_TERRATEC_T3 0x10a0
#define USB_PID_TERRATEC_T5 0x10a1
#define USB_PID_NOXON_DAB_STICK 0x00b3
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.h b/drivers/media/dvb-core/dvb_ca_en50221.h
index aba3b4fbd704..1e4bbbd34d91 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.h
+++ b/drivers/media/dvb-core/dvb_ca_en50221.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _DVB_CA_EN50221_H_
@@ -37,50 +33,53 @@
#define DVB_CA_EN50221_CAMCHANGE_REMOVED 0
#define DVB_CA_EN50221_CAMCHANGE_INSERTED 1
-
-
-/* Structure describing a CA interface */
+/**
+ * struct dvb_ca_en50221- Structure describing a CA interface
+ *
+ * @owner: the module owning this structure
+ * @read_attribute_mem: function for reading attribute memory on the CAM
+ * @write_attribute_mem: function for writing attribute memory on the CAM
+ * @read_cam_control: function for reading the control interface on the CAM
+ * @write_cam_control: function for reading the control interface on the CAM
+ * @slot_reset: function to reset the CAM slot
+ * @slot_shutdown: function to shutdown a CAM slot
+ * @slot_ts_enable: function to enable the Transport Stream on a CAM slot
+ * @poll_slot_status: function to poll slot status. Only necessary if
+ * DVB_CA_FLAG_EN50221_IRQ_CAMCHANGE is not set.
+ * @data: private data, used by caller.
+ * @private: Opaque data used by the dvb_ca core. Do not modify!
+ *
+ * NOTE: the read_*, write_* and poll_slot_status functions will be
+ * called for different slots concurrently and need to use locks where
+ * and if appropriate. There will be no concurrent access to one slot.
+ */
struct dvb_ca_en50221 {
+ struct module *owner;
- /* the module owning this structure */
- struct module* owner;
-
- /* NOTE: the read_*, write_* and poll_slot_status functions will be
- * called for different slots concurrently and need to use locks where
- * and if appropriate. There will be no concurrent access to one slot.
- */
+ int (*read_attribute_mem)(struct dvb_ca_en50221 *ca,
+ int slot, int address);
+ int (*write_attribute_mem)(struct dvb_ca_en50221 *ca,
+ int slot, int address, u8 value);
- /* functions for accessing attribute memory on the CAM */
- int (*read_attribute_mem)(struct dvb_ca_en50221* ca, int slot, int address);
- int (*write_attribute_mem)(struct dvb_ca_en50221* ca, int slot, int address, u8 value);
+ int (*read_cam_control)(struct dvb_ca_en50221 *ca,
+ int slot, u8 address);
+ int (*write_cam_control)(struct dvb_ca_en50221 *ca,
+ int slot, u8 address, u8 value);
- /* functions for accessing the control interface on the CAM */
- int (*read_cam_control)(struct dvb_ca_en50221* ca, int slot, u8 address);
- int (*write_cam_control)(struct dvb_ca_en50221* ca, int slot, u8 address, u8 value);
+ int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot);
+ int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot);
+ int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot);
- /* Functions for controlling slots */
- int (*slot_reset)(struct dvb_ca_en50221* ca, int slot);
- int (*slot_shutdown)(struct dvb_ca_en50221* ca, int slot);
- int (*slot_ts_enable)(struct dvb_ca_en50221* ca, int slot);
+ int (*poll_slot_status)(struct dvb_ca_en50221 *ca, int slot, int open);
- /*
- * Poll slot status.
- * Only necessary if DVB_CA_FLAG_EN50221_IRQ_CAMCHANGE is not set
- */
- int (*poll_slot_status)(struct dvb_ca_en50221* ca, int slot, int open);
+ void *data;
- /* private data, used by caller */
- void* data;
-
- /* Opaque data used by the dvb_ca core. Do not modify! */
- void* private;
+ void *private;
};
-
-
-
-/* ******************************************************************************** */
-/* Functions for reporting IRQ events */
+/*
+ * Functions for reporting IRQ events
+ */
/**
* dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred.
@@ -89,7 +88,8 @@ struct dvb_ca_en50221 {
* @slot: Slot concerned.
* @change_type: One of the DVB_CA_CAMCHANGE_* values
*/
-void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221* pubca, int slot, int change_type);
+void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot,
+ int change_type);
/**
* dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred.
@@ -97,7 +97,7 @@ void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221* pubca, int slot, int ch
* @pubca: CA instance.
* @slot: Slot concerned.
*/
-void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221* pubca, int slot);
+void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot);
/**
* dvb_ca_en50221_frda_irq - An FR or a DA IRQ has occurred.
@@ -105,12 +105,11 @@ void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221* pubca, int slot);
* @ca: CA instance.
* @slot: Slot concerned.
*/
-void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221* ca, int slot);
-
+void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *ca, int slot);
-
-/* ******************************************************************************** */
-/* Initialisation/shutdown functions */
+/*
+ * Initialisation/shutdown functions
+ */
/**
* dvb_ca_en50221_init - Initialise a new DVB CA device.
@@ -122,15 +121,15 @@ void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221* ca, int slot);
*
* @return 0 on success, nonzero on failure
*/
-extern int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, struct dvb_ca_en50221* ca, int flags, int slot_count);
+extern int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
+ struct dvb_ca_en50221 *ca, int flags,
+ int slot_count);
/**
* dvb_ca_en50221_release - Release a DVB CA device.
*
* @ca: The associated dvb_ca instance.
*/
-extern void dvb_ca_en50221_release(struct dvb_ca_en50221* ca);
-
-
+extern void dvb_ca_en50221_release(struct dvb_ca_en50221 *ca);
#endif
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 6c7ff0cdcd32..0cc5e935166c 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -130,7 +130,7 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
feed->peslen += count;
- return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts, DMX_OK);
+ return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts);
}
static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
@@ -152,7 +152,7 @@ static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
return 0;
return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen,
- NULL, 0, &f->filter, DMX_OK);
+ NULL, 0, &f->filter);
}
static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed)
@@ -367,8 +367,7 @@ static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
if (feed->ts_type & TS_PAYLOAD_ONLY)
dvb_dmx_swfilter_payload(feed, buf);
else
- feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts,
- DMX_OK);
+ feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
}
if (feed->ts_type & TS_DECODER)
if (feed->demux->write_to_decoder)
@@ -469,7 +468,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
if (feed->pid == pid)
dvb_dmx_swfilter_packet_type(feed, buf);
else if (feed->pid == 0x2000)
- feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, DMX_OK);
+ feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
}
}
@@ -588,7 +587,7 @@ void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)
spin_lock_irqsave(&demux->lock, flags);
- demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts, DMX_OK);
+ demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts);
spin_unlock_irqrestore(&demux->lock, flags);
}
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index b81e026edab3..ce4332e80a91 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -761,7 +761,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
const u8 *buffer2, size_t buffer2_len,
- struct dmx_ts_feed *feed, enum dmx_success success)
+ struct dmx_ts_feed *feed)
{
struct net_device *dev = feed->priv;
@@ -870,8 +870,7 @@ static void dvb_net_sec(struct net_device *dev,
static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
const u8 *buffer2, size_t buffer2_len,
- struct dmx_section_filter *filter,
- enum dmx_success success)
+ struct dmx_section_filter *filter)
{
struct net_device *dev = filter->priv;
diff --git a/drivers/media/dvb-core/dvbdev.h b/drivers/media/dvb-core/dvbdev.h
index c61a4f03a66f..1069a776bbdb 100644
--- a/drivers/media/dvb-core/dvbdev.h
+++ b/drivers/media/dvb-core/dvbdev.h
@@ -184,10 +184,6 @@ int dvb_unregister_adapter(struct dvb_adapter *adap);
* @pdvbdev: pointer to the place where the new struct dvb_device will be
* stored
* @template: Template used to create &pdvbdev;
- * @device: pointer to struct device that corresponds to the device driver
- * @adapter_nums: Array with a list of the numbers for @dvb_register_adapter;
- * to select among them. Typically, initialized with:
- * DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums)
* @priv: private data
* @type: type of the device: DVB_DEVICE_SEC, DVB_DEVICE_FRONTEND,
* DVB_DEVICE_DEMUX, DVB_DEVICE_DVR, DVB_DEVICE_CA, DVB_DEVICE_NET
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 34b9441840da..445a15c2714f 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -2950,10 +2950,9 @@ struct dvb_frontend *drxd_attach(const struct drxd_config *config,
{
struct drxd_state *state = NULL;
- state = kmalloc(sizeof(struct drxd_state), GFP_KERNEL);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
- memset(state, 0, sizeof(*state));
state->ops = drxd_ops;
state->dev = dev;
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index d5b994f17612..dcd8d94c1037 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -29,6 +29,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
#include <linux/platform_device.h>
@@ -107,7 +108,8 @@ static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
/* intermediate buffers with raw data from the USB device */
struct rtl2832_sdr_frame_buf {
- struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
@@ -304,13 +306,13 @@ static void rtl2832_sdr_urb_complete(struct urb *urb)
}
/* fill framebuffer */
- ptr = vb2_plane_vaddr(&fbuf->vb, 0);
+ ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
len = rtl2832_sdr_convert_stream(dev, ptr, urb->transfer_buffer,
urb->actual_length);
- vb2_set_plane_payload(&fbuf->vb, 0, len);
- v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp);
- fbuf->vb.v4l2_buf.sequence = dev->sequence++;
- vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+ vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, len);
+ v4l2_get_timestamp(&fbuf->vb.timestamp);
+ fbuf->vb.sequence = dev->sequence++;
+ vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
skip:
usb_submit_urb(urb, GFP_ATOMIC);
@@ -464,7 +466,7 @@ static void rtl2832_sdr_cleanup_queued_bufs(struct rtl2832_sdr_dev *dev)
buf = list_entry(dev->queued_bufs.next,
struct rtl2832_sdr_frame_buf, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
}
@@ -488,7 +490,7 @@ static int rtl2832_sdr_querycap(struct file *file, void *fh,
/* Videobuf2 operations */
static int rtl2832_sdr_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt, unsigned int *nbuffers,
+ const void *parg, unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
{
struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vq);
@@ -518,14 +520,15 @@ static int rtl2832_sdr_buf_prepare(struct vb2_buffer *vb)
static void rtl2832_sdr_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct rtl2832_sdr_frame_buf *buf =
- container_of(vb, struct rtl2832_sdr_frame_buf, vb);
+ container_of(vbuf, struct rtl2832_sdr_frame_buf, vb);
unsigned long flags;
/* Check the device has not disconnected between prep and queuing */
if (!dev->udev) {
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
return;
}
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index af5eaf2db2a0..38a20fe181ee 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -233,6 +233,15 @@ static int ml86v7667_g_mbus_config(struct v4l2_subdev *sd,
return 0;
}
+static int ml86v7667_g_std(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ struct ml86v7667_priv *priv = to_ml86v7667(sd);
+
+ *std = priv->std;
+
+ return 0;
+}
+
static int ml86v7667_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct ml86v7667_priv *priv = to_ml86v7667(sd);
@@ -282,6 +291,7 @@ static const struct v4l2_ctrl_ops ml86v7667_ctrl_ops = {
};
static struct v4l2_subdev_video_ops ml86v7667_subdev_video_ops = {
+ .g_std = ml86v7667_g_std,
.s_std = ml86v7667_s_std,
.querystd = ml86v7667_querystd,
.g_input_status = ml86v7667_g_input_status,
@@ -427,7 +437,6 @@ MODULE_DEVICE_TABLE(i2c, ml86v7667_id);
static struct i2c_driver ml86v7667_i2c_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = ml86v7667_probe,
.remove = ml86v7667_remove,
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 53c5ea89f0b9..51b26010403c 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -167,7 +167,7 @@ static int s5c73m3_i2c_read(struct i2c_client *client, u16 addr, u16 *data)
*/
ret = i2c_transfer(client->adapter, msg, 2);
if (ret == 2) {
- *data = be16_to_cpup((u16 *)rbuf);
+ *data = be16_to_cpup((__be16 *)rbuf);
v4l2_dbg(4, s5c73m3_dbg, client,
"%s: addr: 0x%04x, data: 0x%04x\n",
__func__, addr, *data);
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
index fa4a5ebda6b2..72ef9f936e6c 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
@@ -31,6 +31,7 @@ static const struct of_device_id s5c73m3_spi_ids[] = {
{ .compatible = "samsung,s5c73m3" },
{ }
};
+MODULE_DEVICE_TABLE(of, s5c73m3_spi_ids);
enum spi_direction {
SPI_DIR_RX,
@@ -149,7 +150,6 @@ int s5c73m3_register_spi_driver(struct s5c73m3 *state)
spidrv->remove = s5c73m3_spi_remove;
spidrv->probe = s5c73m3_spi_probe;
spidrv->driver.name = S5C73M3_SPI_DRV_NAME;
- spidrv->driver.owner = THIS_MODULE;
spidrv->driver.of_match_table = s5c73m3_spi_ids;
return spi_register_driver(spidrv);
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 636ebd6fe5dc..fb39dfd55e75 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -3131,6 +3131,7 @@ static const struct of_device_id smiapp_of_table[] = {
{ .compatible = "nokia,smia" },
{ },
};
+MODULE_DEVICE_TABLE(of, smiapp_of_table);
static const struct i2c_device_id smiapp_id_table[] = {
{ SMIAPP_NAME, 0 },
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 522a865c5c60..3c5fb2509c47 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -10,6 +10,7 @@
#include <linux/videodev2.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/tvp5150.h>
#include <media/v4l2-ctrls.h>
@@ -1172,8 +1173,7 @@ static int tvp5150_probe(struct i2c_client *c,
sd->ctrl_handler = &core->hdl;
if (core->hdl.error) {
res = core->hdl.error;
- v4l2_ctrl_handler_free(&core->hdl);
- return res;
+ goto err;
}
v4l2_ctrl_handler_setup(&core->hdl);
@@ -1186,9 +1186,17 @@ static int tvp5150_probe(struct i2c_client *c,
core->rect.left = 0;
core->rect.width = TVP5150_H_MAX;
+ res = v4l2_async_register_subdev(sd);
+ if (res < 0)
+ goto err;
+
if (debug > 1)
tvp5150_log_status(sd);
return 0;
+
+err:
+ v4l2_ctrl_handler_free(&core->hdl);
+ return res;
}
static int tvp5150_remove(struct i2c_client *c)
@@ -1200,7 +1208,7 @@ static int tvp5150_remove(struct i2c_client *c)
"tvp5150.c: removing tvp5150 adapter on address 0x%x\n",
c->addr << 1);
- v4l2_device_unregister_subdev(sd);
+ v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&decoder->hdl);
return 0;
}
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 153a46469814..767fe55ba08e 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -235,8 +235,8 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
media_entity_graph_walk_start(&graph, entity);
while ((entity = media_entity_graph_walk_next(&graph))) {
- DECLARE_BITMAP(active, entity->num_pads);
- DECLARE_BITMAP(has_no_links, entity->num_pads);
+ DECLARE_BITMAP(active, MEDIA_ENTITY_MAX_PADS);
+ DECLARE_BITMAP(has_no_links, MEDIA_ENTITY_MAX_PADS);
unsigned int i;
entity->stream_count++;
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 3632958f2158..15a4ebc2844d 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3625,13 +3625,10 @@ static void
bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup,
unsigned int state)
{
- struct timeval ts;
-
if (NULL == wakeup)
return;
- v4l2_get_timestamp(&ts);
- wakeup->vb.ts = ts;
+ v4l2_get_timestamp(&wakeup->vb.ts);
wakeup->vb.field_count = btv->field_count;
wakeup->vb.state = state;
wake_up(&wakeup->vb.done);
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index 1f88ccc174da..a01f0cc745cc 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_COBALT
tristate "Cisco Cobalt support"
- depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
depends on PCI_MSI && MTD_COMPLEX_MAPPINGS
depends on GPIOLIB || COMPILE_TEST
depends on SND
diff --git a/drivers/media/pci/cobalt/cobalt-cpld.c b/drivers/media/pci/cobalt/cobalt-cpld.c
index e83f5c9f7e7d..23c875fc173e 100644
--- a/drivers/media/pci/cobalt/cobalt-cpld.c
+++ b/drivers/media/pci/cobalt/cobalt-cpld.c
@@ -290,8 +290,8 @@ bool cobalt_cpld_set_freq(struct cobalt *cobalt, unsigned f_out)
0x01, 0xc7, 0xfc, 0x7f, 0x53, 0x62).
*/
- cobalt_dbg(1, "%u: %02x %02x %02x %02x %02x %02x\n", f_out,
- regs[0], regs[1], regs[2], regs[3], regs[4], regs[5]);
+ cobalt_dbg(1, "%u: %6ph\n", f_out, regs);
+
while (retries--) {
u8 read_regs[6];
@@ -330,9 +330,7 @@ bool cobalt_cpld_set_freq(struct cobalt *cobalt, unsigned f_out)
if (!memcmp(read_regs, regs, sizeof(read_regs)))
break;
- cobalt_dbg(1, "retry: %02x %02x %02x %02x %02x %02x\n",
- read_regs[0], read_regs[1], read_regs[2],
- read_regs[3], read_regs[4], read_regs[5]);
+ cobalt_dbg(1, "retry: %6ph\n", read_regs);
}
if (2 - retries)
cobalt_info("Needed %d retries\n", 2 - retries);
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
index c206df930669..b2f08e4a68bf 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.h
+++ b/drivers/media/pci/cobalt/cobalt-driver.h
@@ -35,6 +35,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-sg.h>
#include "m00233_video_measure_memmap_package.h"
@@ -206,11 +207,12 @@ struct sg_dma_desc_info {
#define COBALT_STREAM_FL_ADV_IRQ 1
struct cobalt_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
-static inline struct cobalt_buffer *to_cobalt_buffer(struct vb2_buffer *vb2)
+static inline
+struct cobalt_buffer *to_cobalt_buffer(struct vb2_v4l2_buffer *vb2)
{
return container_of(vb2, struct cobalt_buffer, vb);
}
diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c
index d1f5898d11ba..3de26d0714b5 100644
--- a/drivers/media/pci/cobalt/cobalt-irq.c
+++ b/drivers/media/pci/cobalt/cobalt-irq.c
@@ -134,11 +134,12 @@ done:
skip = true;
s->skip_first_frames--;
}
- v4l2_get_timestamp(&cb->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&cb->vb.timestamp);
/* TODO: the sequence number should be read from the FPGA so we
also know about dropped frames. */
- cb->vb.v4l2_buf.sequence = s->sequence++;
- vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
+ cb->vb.sequence = s->sequence++;
+ vb2_buffer_done(&cb->vb.vb2_buf,
+ (skip || s->unstable_frame) ?
VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
}
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index 9756fd3e8af5..ff46e424262f 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -43,11 +43,11 @@ static const struct v4l2_dv_timings cea1080p60 = V4L2_DV_BT_CEA_1920X1080P60;
/* vb2 DMA streaming ops */
-static int cobalt_queue_setup(struct vb2_queue *q,
- const struct v4l2_format *fmt,
+static int cobalt_queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct cobalt_stream *s = q->drv_priv;
unsigned size = s->stride * s->height;
@@ -75,7 +75,7 @@ static int cobalt_buf_init(struct vb2_buffer *vb)
const size_t bytes =
COBALT_MAX_HEIGHT * max_pages_per_line * 0x20;
const size_t audio_bytes = ((1920 * 4) / PAGE_SIZE + 1) * 0x20;
- struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->v4l2_buf.index];
+ struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->index];
struct sg_table *sg_desc = vb2_dma_sg_plane_desc(vb, 0);
unsigned size;
int ret;
@@ -105,17 +105,18 @@ static int cobalt_buf_init(struct vb2_buffer *vb)
static void cobalt_buf_cleanup(struct vb2_buffer *vb)
{
struct cobalt_stream *s = vb->vb2_queue->drv_priv;
- struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->v4l2_buf.index];
+ struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->index];
descriptor_list_free(desc);
}
static int cobalt_buf_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cobalt_stream *s = vb->vb2_queue->drv_priv;
vb2_set_plane_payload(vb, 0, s->stride * s->height);
- vb->v4l2_buf.field = V4L2_FIELD_NONE;
+ vbuf->field = V4L2_FIELD_NONE;
return 0;
}
@@ -128,7 +129,7 @@ static void chain_all_buffers(struct cobalt_stream *s)
list_for_each(p, &s->bufs) {
cb = list_entry(p, struct cobalt_buffer, list);
- desc[i] = &s->dma_desc_info[cb->vb.v4l2_buf.index];
+ desc[i] = &s->dma_desc_info[cb->vb.vb2_buf.index];
if (i > 0)
descriptor_list_chain(desc[i-1], desc[i]);
i++;
@@ -137,10 +138,11 @@ static void chain_all_buffers(struct cobalt_stream *s)
static void cobalt_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *q = vb->vb2_queue;
struct cobalt_stream *s = q->drv_priv;
- struct cobalt_buffer *cb = to_cobalt_buffer(vb);
- struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->v4l2_buf.index];
+ struct cobalt_buffer *cb = to_cobalt_buffer(vbuf);
+ struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->index];
unsigned long flags;
/* Prepare new buffer */
@@ -284,7 +286,7 @@ static void cobalt_dma_start_streaming(struct cobalt_stream *s)
&vo->control);
}
cb = list_first_entry(&s->bufs, struct cobalt_buffer, list);
- omni_sg_dma_start(s, &s->dma_desc_info[cb->vb.v4l2_buf.index]);
+ omni_sg_dma_start(s, &s->dma_desc_info[cb->vb.vb2_buf.index]);
spin_unlock_irqrestore(&s->irqlock, flags);
}
@@ -381,7 +383,7 @@ static void cobalt_dma_stop_streaming(struct cobalt_stream *s)
spin_lock_irqsave(&s->irqlock, flags);
list_for_each(p, &s->bufs) {
cb = list_entry(p, struct cobalt_buffer, list);
- desc = &s->dma_desc_info[cb->vb.v4l2_buf.index];
+ desc = &s->dma_desc_info[cb->vb.vb2_buf.index];
/* Stop DMA after this descriptor chain */
descriptor_list_end_of_chain(desc);
}
@@ -416,7 +418,7 @@ static void cobalt_stop_streaming(struct vb2_queue *q)
list_for_each_safe(p, safe, &s->bufs) {
cb = list_entry(p, struct cobalt_buffer, list);
list_del(&cb->list);
- vb2_buffer_done(&cb->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&cb->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&s->irqlock, flags);
diff --git a/drivers/media/pci/cx18/cx18-mailbox.c b/drivers/media/pci/cx18/cx18-mailbox.c
index eabf00c6351b..1f8aa9a749a1 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.c
+++ b/drivers/media/pci/cx18/cx18-mailbox.c
@@ -202,7 +202,7 @@ static void cx18_mdl_send_to_videobuf(struct cx18_stream *s,
}
if (dispatch) {
- vb_buf->vb.ts = ktime_to_timeval(ktime_get());
+ v4l2_get_timestamp(&vb_buf->vb.ts);
list_del(&vb_buf->vb.queue);
vb_buf->vb.state = VIDEOBUF_DONE;
wake_up(&vb_buf->vb.done);
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 63c0ee5d0bf5..88a3afb66d10 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1138,7 +1138,7 @@ static int cx23885_initialize_codec(struct cx23885_dev *dev, int startencoder)
/* ------------------------------------------------------------------ */
-static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -1155,17 +1155,19 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
static int buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf =
- container_of(vb, struct cx23885_buffer, vb);
+ container_of(vbuf, struct cx23885_buffer, vb);
return cx23885_buf_prepare(buf, &dev->ts1);
}
static void buffer_finish(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
- struct cx23885_buffer *buf = container_of(vb,
+ struct cx23885_buffer *buf = container_of(vbuf,
struct cx23885_buffer, vb);
cx23885_free_buffer(dev, buf);
@@ -1173,8 +1175,9 @@ static void buffer_finish(struct vb2_buffer *vb)
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
- struct cx23885_buffer *buf = container_of(vb,
+ struct cx23885_buffer *buf = container_of(vbuf,
struct cx23885_buffer, vb);
cx23885_buf_queue(&dev->ts1, buf);
@@ -1201,7 +1204,7 @@ static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
struct cx23885_buffer, queue);
list_del(&buf->queue);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
}
spin_unlock_irqrestore(&dev->slock, flags);
return ret;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 7aee76af7a85..bc1c9602f435 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -427,12 +427,13 @@ static void cx23885_wakeup(struct cx23885_tsport *port,
buf = list_entry(q->active.next,
struct cx23885_buffer, queue);
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- buf->vb.v4l2_buf.sequence = q->count++;
- dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
+ v4l2_get_timestamp(&buf->vb.timestamp);
+ buf->vb.sequence = q->count++;
+ dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
+ buf->vb.vb2_buf.index,
count, q->count);
list_del(&buf->queue);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
int cx23885_sram_channel_setup(struct cx23885_dev *dev,
@@ -1453,12 +1454,12 @@ int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
{
struct cx23885_dev *dev = port->dev;
int size = port->ts_packet_size * port->ts_packet_count;
- struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
dprintk(1, "%s: %p\n", __func__, buf);
- if (vb2_plane_size(&buf->vb, 0) < size)
+ if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
return -EINVAL;
- vb2_set_plane_payload(&buf->vb, 0, size);
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
cx23885_risc_databuffer(dev->pci, &buf->risc,
sgt->sgl,
@@ -1503,7 +1504,7 @@ void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
if (list_empty(&cx88q->active)) {
list_add_tail(&buf->queue, &cx88q->active);
dprintk(1, "[%p/%d] %s - first active\n",
- buf, buf->vb.v4l2_buf.index, __func__);
+ buf, buf->vb.vb2_buf.index, __func__);
} else {
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
@@ -1511,7 +1512,7 @@ void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
list_add_tail(&buf->queue, &cx88q->active);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
dprintk(1, "[%p/%d] %s - append to active\n",
- buf, buf->vb.v4l2_buf.index, __func__);
+ buf, buf->vb.vb2_buf.index, __func__);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
@@ -1530,9 +1531,10 @@ static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
buf = list_entry(q->active.next, struct cx23885_buffer,
queue);
list_del(&buf->queue);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
- buf, buf->vb.v4l2_buf.index, reason, (unsigned long)buf->risc.dma);
+ buf, buf->vb.vb2_buf.index, reason,
+ (unsigned long)buf->risc.dma);
}
spin_unlock_irqrestore(&port->slock, flags);
}
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 6e8c24cdb2cd..c4307ad8594c 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -92,7 +92,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
/* ------------------------------------------------------------------ */
-static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -110,18 +110,20 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
static int buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx23885_tsport *port = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf =
- container_of(vb, struct cx23885_buffer, vb);
+ container_of(vbuf, struct cx23885_buffer, vb);
return cx23885_buf_prepare(buf, port);
}
static void buffer_finish(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx23885_tsport *port = vb->vb2_queue->drv_priv;
struct cx23885_dev *dev = port->dev;
- struct cx23885_buffer *buf = container_of(vb,
+ struct cx23885_buffer *buf = container_of(vbuf,
struct cx23885_buffer, vb);
cx23885_free_buffer(dev, buf);
@@ -129,8 +131,9 @@ static void buffer_finish(struct vb2_buffer *vb)
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx23885_tsport *port = vb->vb2_queue->drv_priv;
- struct cx23885_buffer *buf = container_of(vb,
+ struct cx23885_buffer *buf = container_of(vbuf,
struct cx23885_buffer, vb);
cx23885_buf_queue(port, buf);
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index d362d3838c84..cf3cb1324c55 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -121,7 +121,7 @@ static int cx23885_start_vbi_dma(struct cx23885_dev *dev,
/* ------------------------------------------------------------------ */
-static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -138,8 +138,9 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
static int buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
- struct cx23885_buffer *buf = container_of(vb,
+ struct cx23885_buffer *buf = container_of(vbuf,
struct cx23885_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
unsigned lines = VBI_PAL_LINE_COUNT;
@@ -161,7 +162,8 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_finish(struct vb2_buffer *vb)
{
- struct cx23885_buffer *buf = container_of(vb,
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct cx23885_buffer *buf = container_of(vbuf,
struct cx23885_buffer, vb);
cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
@@ -190,8 +192,10 @@ static void buffer_finish(struct vb2_buffer *vb)
*/
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
- struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer, vb);
+ struct cx23885_buffer *buf = container_of(vbuf,
+ struct cx23885_buffer, vb);
struct cx23885_buffer *prev;
struct cx23885_dmaqueue *q = &dev->vbiq;
unsigned long flags;
@@ -206,7 +210,7 @@ static void buffer_queue(struct vb2_buffer *vb)
list_add_tail(&buf->queue, &q->active);
spin_unlock_irqrestore(&dev->slock, flags);
dprintk(2, "[%p/%d] vbi_queue - first active\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
} else {
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
@@ -217,7 +221,7 @@ static void buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&dev->slock, flags);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
dprintk(2, "[%p/%d] buffer_queue - append to active\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
}
}
@@ -245,7 +249,7 @@ static void cx23885_stop_streaming(struct vb2_queue *q)
struct cx23885_buffer, queue);
list_del(&buf->queue);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index ec76470d12a4..71a80e2b842c 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -104,12 +104,12 @@ void cx23885_video_wakeup(struct cx23885_dev *dev,
buf = list_entry(q->active.next,
struct cx23885_buffer, queue);
- buf->vb.v4l2_buf.sequence = q->count++;
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
- count, q->count);
+ buf->vb.sequence = q->count++;
+ v4l2_get_timestamp(&buf->vb.timestamp);
+ dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
+ buf->vb.vb2_buf.index, count, q->count);
list_del(&buf->queue);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm)
@@ -315,7 +315,7 @@ static int cx23885_start_video_dma(struct cx23885_dev *dev,
return 0;
}
-static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -329,9 +329,10 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
static int buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf =
- container_of(vb, struct cx23885_buffer, vb);
+ container_of(vbuf, struct cx23885_buffer, vb);
u32 line0_offset, line1_offset;
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
int field_tff;
@@ -401,7 +402,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
BUG();
}
dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
- buf, buf->vb.v4l2_buf.index,
+ buf, buf->vb.vb2_buf.index,
dev->width, dev->height, dev->fmt->depth, dev->fmt->name,
(unsigned long)buf->risc.dma);
return 0;
@@ -409,7 +410,8 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_finish(struct vb2_buffer *vb)
{
- struct cx23885_buffer *buf = container_of(vb,
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct cx23885_buffer *buf = container_of(vbuf,
struct cx23885_buffer, vb);
cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
@@ -438,8 +440,9 @@ static void buffer_finish(struct vb2_buffer *vb)
*/
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
- struct cx23885_buffer *buf = container_of(vb,
+ struct cx23885_buffer *buf = container_of(vbuf,
struct cx23885_buffer, vb);
struct cx23885_buffer *prev;
struct cx23885_dmaqueue *q = &dev->vidq;
@@ -455,7 +458,7 @@ static void buffer_queue(struct vb2_buffer *vb)
if (list_empty(&q->active)) {
list_add_tail(&buf->queue, &q->active);
dprintk(2, "[%p/%d] buffer_queue - first active\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
} else {
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
prev = list_entry(q->active.prev, struct cx23885_buffer,
@@ -463,7 +466,7 @@ static void buffer_queue(struct vb2_buffer *vb)
list_add_tail(&buf->queue, &q->active);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
dprintk(2, "[%p/%d] buffer_queue - append to active\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
@@ -492,7 +495,7 @@ static void cx23885_stop_streaming(struct vb2_queue *q)
struct cx23885_buffer, queue);
list_del(&buf->queue);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 027ead438194..c5ba0833f47a 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -170,7 +170,7 @@ struct cx23885_riscmem {
/* buffer for one video frame */
struct cx23885_buffer {
/* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head queue;
/* cx23885 specific */
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index 24f964bcc53a..b602eba2b601 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -102,7 +102,7 @@ struct cx25821_audio_dev {
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static bool enable[SNDRV_CARDS] = { 1, [1 ... (SNDRV_CARDS - 1)] = 1 };
+static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable cx25821 soundcard. default enabled.");
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index 7bc495e4ece2..26e3e296d615 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -130,10 +130,10 @@ int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
buf = list_entry(dmaq->active.next,
struct cx25821_buffer, queue);
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- buf->vb.v4l2_buf.sequence = dmaq->count++;
+ v4l2_get_timestamp(&buf->vb.timestamp);
+ buf->vb.sequence = dmaq->count++;
list_del(&buf->queue);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
spin_unlock(&dev->slock);
handled++;
@@ -141,10 +141,11 @@ int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
return handled;
}
-static int cx25821_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int cx25821_queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct cx25821_channel *chan = q->drv_priv;
unsigned size = (chan->fmt->depth * chan->width * chan->height) >> 3;
@@ -159,10 +160,11 @@ static int cx25821_queue_setup(struct vb2_queue *q, const struct v4l2_format *fm
static int cx25821_buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
struct cx25821_dev *dev = chan->dev;
struct cx25821_buffer *buf =
- container_of(vb, struct cx25821_buffer, vb);
+ container_of(vbuf, struct cx25821_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
u32 line0_offset;
int bpl_local = LINE_SIZE_D1;
@@ -176,7 +178,7 @@ static int cx25821_buffer_prepare(struct vb2_buffer *vb)
if (vb2_plane_size(vb, 0) < chan->height * buf->bpl)
return -EINVAL;
vb2_set_plane_payload(vb, 0, chan->height * buf->bpl);
- buf->vb.v4l2_buf.field = chan->field;
+ buf->vb.field = chan->field;
if (chan->pixel_formats == PIXEL_FRMT_411) {
bpl_local = buf->bpl;
@@ -231,7 +233,7 @@ static int cx25821_buffer_prepare(struct vb2_buffer *vb)
}
dprintk(2, "[%p/%d] buffer_prep - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
- buf, buf->vb.v4l2_buf.index, chan->width, chan->height,
+ buf, buf->vb.vb2_buf.index, chan->width, chan->height,
chan->fmt->depth, chan->fmt->name,
(unsigned long)buf->risc.dma);
@@ -240,8 +242,9 @@ static int cx25821_buffer_prepare(struct vb2_buffer *vb)
static void cx25821_buffer_finish(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx25821_buffer *buf =
- container_of(vb, struct cx25821_buffer, vb);
+ container_of(vbuf, struct cx25821_buffer, vb);
struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
struct cx25821_dev *dev = chan->dev;
@@ -250,8 +253,9 @@ static void cx25821_buffer_finish(struct vb2_buffer *vb)
static void cx25821_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx25821_buffer *buf =
- container_of(vb, struct cx25821_buffer, vb);
+ container_of(vbuf, struct cx25821_buffer, vb);
struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
struct cx25821_dev *dev = chan->dev;
struct cx25821_buffer *prev;
@@ -300,7 +304,7 @@ static void cx25821_stop_streaming(struct vb2_queue *q)
struct cx25821_buffer, queue);
list_del(&buf->queue);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h
index d81a08a2df4f..a513b68be0fa 100644
--- a/drivers/media/pci/cx25821/cx25821.h
+++ b/drivers/media/pci/cx25821/cx25821.h
@@ -34,6 +34,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-sg.h>
#include "cx25821-reg.h"
@@ -127,7 +128,7 @@ struct cx25821_riscmem {
/* buffer for one video frame */
struct cx25821_buffer {
/* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head queue;
/* cx25821 specific */
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 7f8dc60028d5..57ddf8a34178 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -101,7 +101,7 @@ typedef struct cx88_audio_dev snd_cx88_card_t;
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static const char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
+static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable cx88x soundcard. default enabled.");
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 24216efa56e7..8b889135be8a 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -637,7 +637,7 @@ static int blackbird_stop_codec(struct cx8802_dev *dev)
/* ------------------------------------------------------------------ */
-static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -653,16 +653,18 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
static int buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
- struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
return cx8802_buf_prepare(vb->vb2_queue, dev, buf);
}
static void buffer_finish(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
- struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
struct cx88_riscmem *risc = &buf->risc;
if (risc->cpu)
@@ -672,8 +674,9 @@ static void buffer_finish(struct vb2_buffer *vb)
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
- struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
cx8802_buf_queue(dev, buf);
}
@@ -721,7 +724,7 @@ fail:
struct cx88_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
}
spin_unlock_irqrestore(&dev->slock, flags);
return err;
@@ -749,7 +752,7 @@ static void stop_streaming(struct vb2_queue *q)
struct cx88_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index aab7cf4c9825..9a43c7826b60 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -518,11 +518,11 @@ void cx88_wakeup(struct cx88_core *core,
buf = list_entry(q->active.next,
struct cx88_buffer, list);
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- buf->vb.v4l2_buf.field = core->field;
- buf->vb.v4l2_buf.sequence = q->count++;
+ v4l2_get_timestamp(&buf->vb.timestamp);
+ buf->vb.field = core->field;
+ buf->vb.sequence = q->count++;
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
void cx88_shutdown(struct cx88_core *core)
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 9dfa5ee32a8f..f04835073844 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -82,7 +82,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
/* ------------------------------------------------------------------ */
-static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -99,16 +99,18 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
static int buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
- struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
return cx8802_buf_prepare(vb->vb2_queue, dev, buf);
}
static void buffer_finish(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
- struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
struct cx88_riscmem *risc = &buf->risc;
if (risc->cpu)
@@ -118,8 +120,9 @@ static void buffer_finish(struct vb2_buffer *vb)
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
- struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
cx8802_buf_queue(dev, buf);
}
@@ -149,7 +152,7 @@ static void stop_streaming(struct vb2_queue *q)
struct cx88_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 34f505744477..9961b2232b97 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -214,7 +214,7 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
buf = list_entry(q->active.next, struct cx88_buffer, list);
dprintk(2,"restart_queue [%p/%d]: restart dma\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
cx8802_start_dma(dev, q, buf);
return 0;
}
@@ -225,13 +225,13 @@ int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
struct cx88_buffer *buf)
{
int size = dev->ts_packet_size * dev->ts_packet_count;
- struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
struct cx88_riscmem *risc = &buf->risc;
int rc;
- if (vb2_plane_size(&buf->vb, 0) < size)
+ if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
return -EINVAL;
- vb2_set_plane_payload(&buf->vb, 0, size);
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
rc = cx88_risc_databuffer(dev->pci, risc, sgt->sgl,
dev->ts_packet_size, dev->ts_packet_count, 0);
@@ -259,7 +259,7 @@ void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
dprintk( 1, "queue is empty - first active\n" );
list_add_tail(&buf->list, &cx88q->active);
dprintk(1,"[%p/%d] %s - first active\n",
- buf, buf->vb.v4l2_buf.index, __func__);
+ buf, buf->vb.vb2_buf.index, __func__);
} else {
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
@@ -268,7 +268,7 @@ void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
list_add_tail(&buf->list, &cx88q->active);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
dprintk( 1, "[%p/%d] %s - append to active\n",
- buf, buf->vb.v4l2_buf.index, __func__);
+ buf, buf->vb.vb2_buf.index, __func__);
}
}
@@ -284,7 +284,7 @@ static void do_cancel_buffers(struct cx8802_dev *dev)
while (!list_empty(&q->active)) {
buf = list_entry(q->active.next, struct cx88_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock,flags);
}
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index 7510e80eb2ff..007a5eee8e5e 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -100,14 +100,14 @@ int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
buf = list_entry(q->active.next, struct cx88_buffer, list);
dprintk(2,"restart_queue [%p/%d]: restart dma\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
cx8800_start_vbi_dma(dev, q, buf);
return 0;
}
/* ------------------------------------------------------------------ */
-static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -125,8 +125,9 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
static int buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
- struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
unsigned int lines;
unsigned int size;
@@ -149,8 +150,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_finish(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
- struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
struct cx88_riscmem *risc = &buf->risc;
if (risc->cpu)
@@ -160,8 +162,9 @@ static void buffer_finish(struct vb2_buffer *vb)
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
- struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
struct cx88_buffer *prev;
struct cx88_dmaqueue *q = &dev->vbiq;
@@ -174,7 +177,7 @@ static void buffer_queue(struct vb2_buffer *vb)
list_add_tail(&buf->list, &q->active);
cx8800_start_vbi_dma(dev, q, buf);
dprintk(2,"[%p/%d] vbi_queue - first active\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
} else {
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
@@ -182,7 +185,7 @@ static void buffer_queue(struct vb2_buffer *vb)
list_add_tail(&buf->list, &q->active);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
dprintk(2,"[%p/%d] buffer_queue - append to active\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
}
}
@@ -213,7 +216,7 @@ static void stop_streaming(struct vb2_queue *q)
struct cx88_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 400e5caefd58..f3b12dbbe9a1 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -420,7 +420,7 @@ static int restart_video_queue(struct cx8800_dev *dev,
if (!list_empty(&q->active)) {
buf = list_entry(q->active.next, struct cx88_buffer, list);
dprintk(2,"restart_queue [%p/%d]: restart dma\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
start_video_dma(dev, q, buf);
}
return 0;
@@ -429,7 +429,7 @@ static int restart_video_queue(struct cx8800_dev *dev,
/* ------------------------------------------------------------------ */
-static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -444,9 +444,10 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
static int buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
struct cx88_core *core = dev->core;
- struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
buf->bpl = core->width * dev->fmt->depth >> 3;
@@ -489,7 +490,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
break;
}
dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
- buf, buf->vb.v4l2_buf.index,
+ buf, buf->vb.vb2_buf.index,
core->width, core->height, dev->fmt->depth, dev->fmt->name,
(unsigned long)buf->risc.dma);
return 0;
@@ -497,8 +498,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_finish(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
- struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
struct cx88_riscmem *risc = &buf->risc;
if (risc->cpu)
@@ -508,8 +510,9 @@ static void buffer_finish(struct vb2_buffer *vb)
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
- struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
struct cx88_buffer *prev;
struct cx88_core *core = dev->core;
struct cx88_dmaqueue *q = &dev->vidq;
@@ -522,7 +525,7 @@ static void buffer_queue(struct vb2_buffer *vb)
if (list_empty(&q->active)) {
list_add_tail(&buf->list, &q->active);
dprintk(2,"[%p/%d] buffer_queue - first active\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
} else {
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
@@ -530,7 +533,7 @@ static void buffer_queue(struct vb2_buffer *vb)
list_add_tail(&buf->list, &q->active);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
dprintk(2, "[%p/%d] buffer_queue - append to active\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
}
}
@@ -560,7 +563,7 @@ static void stop_streaming(struct vb2_queue *q)
struct cx88_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 785fe2e0d702..2996eb3ea1fc 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -321,7 +321,7 @@ struct cx88_riscmem {
/* buffer for one video frame */
struct cx88_buffer {
/* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
/* cx88 specific */
diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c
index 8df634518927..d84abde5ea29 100644
--- a/drivers/media/pci/dt3155/dt3155.c
+++ b/drivers/media/pci/dt3155/dt3155.c
@@ -131,11 +131,12 @@ static int wait_i2c_reg(void __iomem *addr)
}
static int
-dt3155_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+dt3155_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct dt3155_priv *pd = vb2_get_drv_priv(vq);
unsigned size = pd->width * pd->height;
@@ -160,7 +161,7 @@ static int dt3155_buf_prepare(struct vb2_buffer *vb)
static int dt3155_start_streaming(struct vb2_queue *q, unsigned count)
{
struct dt3155_priv *pd = vb2_get_drv_priv(q);
- struct vb2_buffer *vb = pd->curr_buf;
+ struct vb2_buffer *vb = &pd->curr_buf->vb2_buf;
dma_addr_t dma_addr;
pd->sequence = 0;
@@ -208,7 +209,7 @@ static void dt3155_stop_streaming(struct vb2_queue *q)
spin_lock_irq(&pd->lock);
if (pd->curr_buf) {
- vb2_buffer_done(pd->curr_buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&pd->curr_buf->vb2_buf, VB2_BUF_STATE_ERROR);
pd->curr_buf = NULL;
}
@@ -222,6 +223,7 @@ static void dt3155_stop_streaming(struct vb2_queue *q)
static void dt3155_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct dt3155_priv *pd = vb2_get_drv_priv(vb->vb2_queue);
/* pd->vidq.streaming = 1 when dt3155_buf_queue() is invoked */
@@ -229,7 +231,7 @@ static void dt3155_buf_queue(struct vb2_buffer *vb)
if (pd->curr_buf)
list_add_tail(&vb->done_entry, &pd->dmaq);
else
- pd->curr_buf = vb;
+ pd->curr_buf = vbuf;
spin_unlock_irq(&pd->lock);
}
@@ -269,14 +271,14 @@ static irqreturn_t dt3155_irq_handler_even(int irq, void *dev_id)
spin_lock(&ipd->lock);
if (ipd->curr_buf && !list_empty(&ipd->dmaq)) {
- v4l2_get_timestamp(&ipd->curr_buf->v4l2_buf.timestamp);
- ipd->curr_buf->v4l2_buf.sequence = ipd->sequence++;
- ipd->curr_buf->v4l2_buf.field = V4L2_FIELD_NONE;
- vb2_buffer_done(ipd->curr_buf, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&ipd->curr_buf->timestamp);
+ ipd->curr_buf->sequence = ipd->sequence++;
+ ipd->curr_buf->field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&ipd->curr_buf->vb2_buf, VB2_BUF_STATE_DONE);
ivb = list_first_entry(&ipd->dmaq, typeof(*ivb), done_entry);
list_del(&ivb->done_entry);
- ipd->curr_buf = ivb;
+ ipd->curr_buf = to_vb2_v4l2_buffer(ivb);
dma_addr = vb2_dma_contig_plane_dma_addr(ivb, 0);
iowrite32(dma_addr, ipd->regs + EVEN_DMA_START);
iowrite32(dma_addr + ipd->width, ipd->regs + ODD_DMA_START);
diff --git a/drivers/media/pci/dt3155/dt3155.h b/drivers/media/pci/dt3155/dt3155.h
index 4e1f4d598d57..b3531e0bc733 100644
--- a/drivers/media/pci/dt3155/dt3155.h
+++ b/drivers/media/pci/dt3155/dt3155.h
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
+#include <media/videobuf2-v4l2.h>
#define DT3155_NAME "dt3155"
#define DT3155_VER_MAJ 2
@@ -181,7 +182,7 @@ struct dt3155_priv {
struct pci_dev *pdev;
struct vb2_queue vidq;
struct vb2_alloc_ctx *alloc_ctx;
- struct vb2_buffer *curr_buf;
+ struct vb2_v4l2_buffer *curr_buf;
struct mutex mux;
struct list_head dmaq;
spinlock_t lock;
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index 41fa21534edf..8a86b61a896d 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -41,6 +41,7 @@
#include "ivtv-alsa-pcm.h"
int ivtv_alsa_debug;
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
#define IVTV_DEBUG_ALSA_INFO(fmt, arg...) \
do { \
@@ -54,6 +55,10 @@ MODULE_PARM_DESC(debug,
"\t\t\t 1/0x0001: warning\n"
"\t\t\t 2/0x0002: info\n");
+module_param_array(index, int, NULL, 0444);
+MODULE_PARM_DESC(index,
+ "Index value for IVTV ALSA capture interface(s).\n");
+
MODULE_AUTHOR("Andy Walls");
MODULE_DESCRIPTION("CX23415/CX23416 ALSA Interface");
MODULE_SUPPORTED_DEVICE("CX23415/CX23416 MPEG2 encoder");
@@ -137,7 +142,7 @@ static int snd_ivtv_init(struct v4l2_device *v4l2_dev)
struct ivtv *itv = to_ivtv(v4l2_dev);
struct snd_card *sc = NULL;
struct snd_ivtv_card *itvsc;
- int ret;
+ int ret, idx;
/* Numbrs steps from "Writing an ALSA Driver" by Takashi Iwai */
@@ -145,8 +150,10 @@ static int snd_ivtv_init(struct v4l2_device *v4l2_dev)
/* This is a no-op for us. We'll use the itv->instance */
/* (2) Create a card instance */
+ /* use first available id if not specified otherwise*/
+ idx = index[itv->instance] == -1 ? SNDRV_DEFAULT_IDX1 : index[itv->instance];
ret = snd_card_new(&itv->pdev->dev,
- SNDRV_DEFAULT_IDX1, /* use first available id */
+ idx,
SNDRV_DEFAULT_STR1, /* xid from end of shortname*/
THIS_MODULE, 0, &sc);
if (ret) {
@@ -196,6 +203,9 @@ static int snd_ivtv_init(struct v4l2_device *v4l2_dev)
goto err_exit_free;
}
+ IVTV_ALSA_INFO("%s: Instance %d registered as ALSA card %d\n",
+ __func__, itv->instance, sc->number);
+
return 0;
err_exit_free:
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index 2ad65eb29832..2b8e7b2f2b86 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -75,15 +75,15 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);
/* Get user pages for DMA Xfer */
- down_read(&current->mm->mmap_sem);
- y_pages = get_user_pages(current, current->mm, y_dma.uaddr, y_dma.page_count, 0, 1, &dma->map[0], NULL);
+ y_pages = get_user_pages_unlocked(current, current->mm,
+ y_dma.uaddr, y_dma.page_count, 0, 1,
+ &dma->map[0]);
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
if (y_pages == y_dma.page_count) {
- uv_pages = get_user_pages(current, current->mm,
- uv_dma.uaddr, uv_dma.page_count, 0, 1,
- &dma->map[y_pages], NULL);
+ uv_pages = get_user_pages_unlocked(current, current->mm,
+ uv_dma.uaddr, uv_dma.page_count, 0, 1,
+ &dma->map[y_pages]);
}
- up_read(&current->mm->mmap_sem);
if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
int rc = -EFAULT;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb.h b/drivers/media/pci/netup_unidvb/netup_unidvb.h
index fa951102d7fb..a67b28111905 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb.h
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb.h
@@ -54,7 +54,7 @@ struct netup_dma {
u8 num;
spinlock_t lock;
struct netup_unidvb_dev *ndev;
- struct netup_dma_regs *regs;
+ struct netup_dma_regs __iomem *regs;
u32 ring_buffer_size;
u8 *addr_virt;
dma_addr_t addr_phys;
@@ -82,7 +82,7 @@ struct netup_i2c {
wait_queue_head_t wq;
struct i2c_adapter adap;
struct netup_unidvb_dev *dev;
- struct netup_i2c_regs *regs;
+ struct netup_i2c_regs __iomem *regs;
struct i2c_msg *msg;
enum netup_i2c_state state;
u32 xmit_size;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c b/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c
index 751b51b03593..f46ffac66ee9 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c
@@ -147,7 +147,7 @@ static int netup_unidvb_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
{
struct netup_ci_state *state = en50221->data;
struct netup_unidvb_dev *dev = state->dev;
- u8 val = state->membase8_config[addr];
+ u8 val = *((u8 __force *)state->membase8_io + addr);
dev_dbg(&dev->pci_dev->dev,
"%s(): addr=0x%x val=0x%x\n", __func__, addr, val);
@@ -162,7 +162,7 @@ static int netup_unidvb_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
dev_dbg(&dev->pci_dev->dev,
"%s(): addr=0x%x data=0x%x\n", __func__, addr, data);
- state->membase8_config[addr] = data;
+ *((u8 __force *)state->membase8_io + addr) = data;
return 0;
}
@@ -171,7 +171,7 @@ static int netup_unidvb_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221,
{
struct netup_ci_state *state = en50221->data;
struct netup_unidvb_dev *dev = state->dev;
- u8 val = state->membase8_io[addr];
+ u8 val = *((u8 __force *)state->membase8_io + addr);
dev_dbg(&dev->pci_dev->dev,
"%s(): addr=0x%x val=0x%x\n", __func__, addr, val);
@@ -186,7 +186,7 @@ static int netup_unidvb_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221,
dev_dbg(&dev->pci_dev->dev,
"%s(): addr=0x%x data=0x%x\n", __func__, addr, data);
- state->membase8_io[addr] = data;
+ *((u8 __force *)state->membase8_io + addr) = data;
return 0;
}
@@ -226,7 +226,7 @@ int netup_unidvb_ci_register(struct netup_unidvb_dev *dev,
__func__, result);
return result;
}
- writew(NETUP_UNIDVB_IRQ_CI, (u16 *)(dev->bmmio0 + REG_IMASK_SET));
+ writew(NETUP_UNIDVB_IRQ_CI, dev->bmmio0 + REG_IMASK_SET);
dev_info(&pci_dev->dev,
"%s(): CI adapter %d init done\n", __func__, num);
return 0;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 6d8bf6277647..83c90d3462e9 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/list.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
#include "netup_unidvb.h"
@@ -110,7 +111,7 @@ struct netup_dma_regs {
} __packed __aligned(1);
struct netup_unidvb_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
u32 size;
};
@@ -189,12 +190,10 @@ static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
"%s(): DMA%d enable %d\n", __func__, dma->num, enable);
if (enable) {
writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set);
- writew(irq_mask,
- (u16 *)(dma->ndev->bmmio0 + REG_IMASK_SET));
+ writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_SET);
} else {
writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear);
- writew(irq_mask,
- (u16 *)(dma->ndev->bmmio0 + REG_IMASK_CLEAR));
+ writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_CLEAR);
}
}
@@ -278,7 +277,7 @@ static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
}
static int netup_unidvb_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nbuffers,
unsigned int *nplanes,
unsigned int sizes[],
@@ -300,7 +299,8 @@ static int netup_unidvb_queue_setup(struct vb2_queue *vq,
static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
{
struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
- struct netup_unidvb_buffer *buf = container_of(vb,
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct netup_unidvb_buffer *buf = container_of(vbuf,
struct netup_unidvb_buffer, vb);
dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
@@ -312,7 +312,8 @@ static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
{
unsigned long flags;
struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
- struct netup_unidvb_buffer *buf = container_of(vb,
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct netup_unidvb_buffer *buf = container_of(vbuf,
struct netup_unidvb_buffer, vb);
dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
@@ -509,7 +510,7 @@ static int netup_unidvb_ring_copy(struct netup_dma *dma,
{
u32 copy_bytes, ring_bytes;
u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
- u8 *p = vb2_plane_vaddr(&buf->vb, 0);
+ u8 *p = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
struct netup_unidvb_dev *ndev = dma->ndev;
if (p == NULL) {
@@ -522,7 +523,7 @@ static int netup_unidvb_ring_copy(struct netup_dma *dma,
ring_bytes = dma->ring_buffer_size - dma->data_offset;
copy_bytes = (ring_bytes > buff_bytes) ?
buff_bytes : ring_bytes;
- memcpy_fromio(p, dma->addr_virt + dma->data_offset, copy_bytes);
+ memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
p += copy_bytes;
buf->size += copy_bytes;
buff_bytes -= copy_bytes;
@@ -535,7 +536,7 @@ static int netup_unidvb_ring_copy(struct netup_dma *dma,
ring_bytes = dma->data_size;
copy_bytes = (ring_bytes > buff_bytes) ?
buff_bytes : ring_bytes;
- memcpy_fromio(p, dma->addr_virt + dma->data_offset, copy_bytes);
+ memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
buf->size += copy_bytes;
dma->data_size -= copy_bytes;
dma->data_offset += copy_bytes;
@@ -579,9 +580,9 @@ static void netup_unidvb_dma_worker(struct work_struct *work)
dev_dbg(&ndev->pci_dev->dev,
"%s(): buffer %p done, size %d\n",
__func__, buf, buf->size);
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- vb2_set_plane_payload(&buf->vb, 0, buf->size);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&buf->vb.timestamp);
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
}
work_done:
@@ -599,7 +600,7 @@ static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
buf = list_first_entry(&dma->free_buffers,
struct netup_unidvb_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dma->lock, flags);
}
@@ -641,10 +642,10 @@ static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
__func__, num, dma->addr_virt,
(unsigned long long)dma->addr_phys,
dma->ring_buffer_size);
- memset_io(dma->addr_virt, 0, dma->ring_buffer_size);
+ memset_io((u8 __iomem *)dma->addr_virt, 0, dma->ring_buffer_size);
dma->addr_last = dma->addr_phys;
dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
- dma->regs = (struct netup_dma_regs *)(num == 0 ?
+ dma->regs = (struct netup_dma_regs __iomem *)(num == 0 ?
ndev->bmmio0 + NETUP_DMA0_ADDR :
ndev->bmmio0 + NETUP_DMA1_ADDR);
writel((NETUP_DMA_BLOCKS_COUNT << 24) |
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
index eaaa2d0a5fba..c09c52bc6eab 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
@@ -320,7 +320,7 @@ static int netup_i2c_init(struct netup_unidvb_dev *ndev, int bus_num)
i2c = &ndev->i2c[bus_num];
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wq);
- i2c->regs = (struct netup_i2c_regs *)(ndev->bmmio0 +
+ i2c->regs = (struct netup_i2c_regs __iomem *)(ndev->bmmio0 +
(bus_num == 0 ? NETUP_I2C_BUS0_ADDR : NETUP_I2C_BUS1_ADDR));
netup_i2c_reset(i2c);
i2c->adap = netup_i2c_adapter;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
index 56773f3893d4..f33c0de3e849 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
@@ -45,7 +45,7 @@ struct netup_spi_regs {
struct netup_spi {
struct device *dev;
struct spi_master *master;
- struct netup_spi_regs *regs;
+ struct netup_spi_regs __iomem *regs;
u8 __iomem *mmio;
spinlock_t lock;
wait_queue_head_t waitq;
@@ -200,7 +200,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev)
spin_lock_init(&nspi->lock);
init_waitqueue_head(&nspi->waitq);
nspi->master = master;
- nspi->regs = (struct netup_spi_regs *)(ndev->bmmio0 + 0x4000);
+ nspi->regs = (struct netup_spi_regs __iomem *)(ndev->bmmio0 + 0x4000);
writew(2, &nspi->regs->clock_divider);
writew(NETUP_UNIDVB_IRQ_SPI, ndev->bmmio0 + REG_IMASK_SET);
ndev->spi = nspi;
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index c7405766609c..29d2094c42a0 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -5884,6 +5884,42 @@ struct saa7134_board saa7134_boards[] = {
.amux = LINE1,
},
},
+ [SAA7134_BOARD_LEADTEK_WINFAST_TV2100_FM] = {
+ .name = "Leadtek Winfast TV2100 FM",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_TNF_5335MF,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .gpiomask = 0x0d,
+ .inputs = {{
+ .name = name_tv_mono,
+ .vmux = 1,
+ .amux = LINE1,
+ .gpio = 0x00,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 3,
+ .amux = LINE2,
+ .gpio = 0x08,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE2,
+ .gpio = 0x08,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE1,
+ .gpio = 0x04,
+ },
+ .mute = {
+ .name = name_mute,
+ .amux = LINE1,
+ .gpio = 0x08,
+ },
+ },
};
@@ -7149,6 +7185,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0xa10a,
.driver_data = SAA7134_BOARD_AVERMEDIA_505,
}, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7130,
+ .subvendor = 0x107d,
+ .subdevice = 0x6f3a,
+ .driver_data = SAA7134_BOARD_LEADTEK_WINFAST_TV2100_FM,
+ }, {
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -7545,6 +7587,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS:
case SAA7134_BOARD_ROVERMEDIA_LINK_PRO_FM:
case SAA7134_BOARD_LEADTEK_WINFAST_DTV1000S:
+ case SAA7134_BOARD_LEADTEK_WINFAST_TV2100_FM:
dev->has_remote = SAA7134_REMOTE_GPIO;
break;
case SAA7134_BOARD_FLYDVBS_LR300:
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 72d7f992375e..87f39f97a79f 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -216,13 +216,14 @@ int saa7134_buffer_count(unsigned int size, unsigned int count)
int saa7134_buffer_startpage(struct saa7134_buf *buf)
{
- return saa7134_buffer_pages(vb2_plane_size(&buf->vb2, 0)) * buf->vb2.v4l2_buf.index;
+ return saa7134_buffer_pages(vb2_plane_size(&buf->vb2.vb2_buf, 0))
+ * buf->vb2.vb2_buf.index;
}
unsigned long saa7134_buffer_base(struct saa7134_buf *buf)
{
unsigned long base;
- struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
+ struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2.vb2_buf, 0);
base = saa7134_buffer_startpage(buf) * 4096;
base += dma->sgl[0].offset;
@@ -308,9 +309,9 @@ void saa7134_buffer_finish(struct saa7134_dev *dev,
core_dbg("buffer_finish %p\n", q->curr);
/* finish current buffer */
- v4l2_get_timestamp(&q->curr->vb2.v4l2_buf.timestamp);
- q->curr->vb2.v4l2_buf.sequence = q->seq_nr++;
- vb2_buffer_done(&q->curr->vb2, state);
+ v4l2_get_timestamp(&q->curr->vb2.timestamp);
+ q->curr->vb2.sequence = q->seq_nr++;
+ vb2_buffer_done(&q->curr->vb2.vb2_buf, state);
q->curr = NULL;
}
@@ -375,7 +376,8 @@ void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q)
if (!list_empty(&q->queue)) {
list_for_each_safe(pos, n, &q->queue) {
tmp = list_entry(pos, struct saa7134_buf, entry);
- vb2_buffer_done(&tmp->vb2, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&tmp->vb2.vb2_buf,
+ VB2_BUF_STATE_ERROR);
list_del(pos);
tmp = NULL;
}
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 11a172000291..69d32d3fa32c 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -835,6 +835,13 @@ int saa7134_input_init1(struct saa7134_dev *dev)
mask_keycode = 0xffff;
raw_decode = true;
break;
+ case SAA7134_BOARD_LEADTEK_WINFAST_TV2100_FM:
+ ir_codes = RC_MAP_LEADTEK_Y04G0051;
+ mask_keydown = 0x0040000; /* Enable GPIO18 line on both edges */
+ mask_keyup = 0x0040000;
+ mask_keycode = 0xffff;
+ raw_decode = true;
+ break;
}
if (NULL == ir_codes) {
pr_err("Oops: IR config error [card=%d]\n", dev->board);
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index 4b202fa5fbc4..7fb5ee7e20ac 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -79,8 +79,9 @@ static int buffer_activate(struct saa7134_dev *dev,
int saa7134_ts_buffer_init(struct vb2_buffer *vb2)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
- struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
+ struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
dmaq->curr = NULL;
buf->activate = buffer_activate;
@@ -91,9 +92,10 @@ EXPORT_SYMBOL_GPL(saa7134_ts_buffer_init);
int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
struct saa7134_dev *dev = dmaq->dev;
- struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
+ struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
unsigned int lines, llength, size;
@@ -107,14 +109,14 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
return -EINVAL;
vb2_set_plane_payload(vb2, 0, size);
- vb2->v4l2_buf.field = dev->field;
+ vbuf->field = dev->field;
return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
saa7134_buffer_startpage(buf));
}
EXPORT_SYMBOL_GPL(saa7134_ts_buffer_prepare);
-int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+int saa7134_ts_queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -148,10 +150,12 @@ int saa7134_ts_start_streaming(struct vb2_queue *vq, unsigned int count)
list_for_each_entry_safe(buf, tmp, &dmaq->queue, entry) {
list_del(&buf->entry);
- vb2_buffer_done(&buf->vb2, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb2.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
}
if (dmaq->curr) {
- vb2_buffer_done(&dmaq->curr->vb2, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&dmaq->curr->vb2.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
dmaq->curr = NULL;
}
return -EBUSY;
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index 4d36586ad752..6271b0eb0265 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -83,7 +83,7 @@ static int buffer_activate(struct saa7134_dev *dev,
struct saa7134_buf *buf,
struct saa7134_buf *next)
{
- struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_queue->drv_priv;
+ struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_buf.vb2_queue->drv_priv;
unsigned long control, base;
vbi_dbg("buffer_activate [%p]\n", buf);
@@ -119,8 +119,9 @@ static int buffer_prepare(struct vb2_buffer *vb2)
{
struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
struct saa7134_dev *dev = dmaq->dev;
- struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
- struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+ struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
+ struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
unsigned int size;
if (dma->sgl->offset) {
@@ -137,7 +138,7 @@ static int buffer_prepare(struct vb2_buffer *vb2)
saa7134_buffer_startpage(buf));
}
-static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -161,7 +162,8 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
static int buffer_init(struct vb2_buffer *vb2)
{
struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
- struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+ struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
dmaq->curr = NULL;
buf->activate = buffer_activate;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 035039cfae6d..518086c7aed5 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -791,7 +791,7 @@ static int buffer_activate(struct saa7134_dev *dev,
struct saa7134_buf *buf,
struct saa7134_buf *next)
{
- struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_queue->drv_priv;
+ struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_buf.vb2_queue->drv_priv;
unsigned long base,control,bpl;
unsigned long bpl_uv,lines_uv,base2,base3,tmp; /* planar */
@@ -872,7 +872,8 @@ static int buffer_activate(struct saa7134_dev *dev,
static int buffer_init(struct vb2_buffer *vb2)
{
struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
- struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+ struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
dmaq->curr = NULL;
buf->activate = buffer_activate;
@@ -883,8 +884,9 @@ static int buffer_prepare(struct vb2_buffer *vb2)
{
struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
struct saa7134_dev *dev = dmaq->dev;
- struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
- struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+ struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
+ struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
unsigned int size;
if (dma->sgl->offset) {
@@ -896,13 +898,13 @@ static int buffer_prepare(struct vb2_buffer *vb2)
return -EINVAL;
vb2_set_plane_payload(vb2, 0, size);
- vb2->v4l2_buf.field = dev->field;
+ vbuf->field = dev->field;
return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
saa7134_buffer_startpage(buf));
}
-static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -932,7 +934,8 @@ void saa7134_vb2_buffer_queue(struct vb2_buffer *vb)
{
struct saa7134_dmaqueue *dmaq = vb->vb2_queue->drv_priv;
struct saa7134_dev *dev = dmaq->dev;
- struct saa7134_buf *buf = container_of(vb, struct saa7134_buf, vb2);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
saa7134_buffer_queue(dev, dmaq, buf);
}
@@ -953,10 +956,12 @@ int saa7134_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
list_for_each_entry_safe(buf, tmp, &dmaq->queue, entry) {
list_del(&buf->entry);
- vb2_buffer_done(&buf->vb2, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb2.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
}
if (dmaq->curr) {
- vb2_buffer_done(&dmaq->curr->vb2, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&dmaq->curr->vb2.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
dmaq->curr = NULL;
}
return -EBUSY;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 6b5f6f45d285..6b6d234f5cab 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -342,6 +342,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_AVERMEDIA_A706 192
#define SAA7134_BOARD_WIS_VOYAGER 193
#define SAA7134_BOARD_AVERMEDIA_505 194
+#define SAA7134_BOARD_LEADTEK_WINFAST_TV2100_FM 195
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
@@ -459,7 +460,7 @@ struct saa7134_thread {
/* buffer for one video/vbi/ts frame */
struct saa7134_buf {
/* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb2;
+ struct vb2_v4l2_buffer vb2;
/* saa7134 specific */
unsigned int top_seen;
@@ -819,7 +820,7 @@ void saa7134_video_fini(struct saa7134_dev *dev);
int saa7134_ts_buffer_init(struct vb2_buffer *vb2);
int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2);
-int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+int saa7134_ts_queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[]);
int saa7134_ts_start_streaming(struct vb2_queue *vq, unsigned int count);
diff --git a/drivers/media/pci/saa7164/Kconfig b/drivers/media/pci/saa7164/Kconfig
index a53db7d1c96e..9098ef5feca4 100644
--- a/drivers/media/pci/saa7164/Kconfig
+++ b/drivers/media/pci/saa7164/Kconfig
@@ -5,7 +5,6 @@ config VIDEO_SAA7164
select FW_LOADER
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEOBUF_DVB
select DVB_TDA10048 if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index 4434e0f28c26..1b184c39ba97 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -25,6 +25,18 @@
#define ENCODER_MIN_BITRATE 1000000
#define ENCODER_DEF_BITRATE 5000000
+/*
+ * This is a dummy non-zero value for the sizeimage field of v4l2_pix_format.
+ * It is not actually used for anything since this driver does not support
+ * stream I/O, only read(), and because this driver produces an MPEG stream
+ * and not discrete frames. But the V4L2 spec doesn't allow for this value
+ * to be 0, so set it to 0x10000 instead.
+ *
+ * If we ever change this driver to support stream I/O, then this field
+ * will be the size of the streaming buffers.
+ */
+#define SAA7164_SIZEIMAGE (0x10000)
+
static struct saa7164_tvnorm saa7164_tvnorms[] = {
{
.name = "NTSC-M",
@@ -35,24 +47,6 @@ static struct saa7164_tvnorm saa7164_tvnorms[] = {
}
};
-static const u32 saa7164_v4l2_ctrls[] = {
- V4L2_CID_BRIGHTNESS,
- V4L2_CID_CONTRAST,
- V4L2_CID_SATURATION,
- V4L2_CID_HUE,
- V4L2_CID_AUDIO_VOLUME,
- V4L2_CID_SHARPNESS,
- V4L2_CID_MPEG_STREAM_TYPE,
- V4L2_CID_MPEG_VIDEO_ASPECT,
- V4L2_CID_MPEG_VIDEO_B_FRAMES,
- V4L2_CID_MPEG_VIDEO_GOP_SIZE,
- V4L2_CID_MPEG_AUDIO_MUTE,
- V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
- V4L2_CID_MPEG_VIDEO_BITRATE,
- V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
- 0
-};
-
/* Take the encoder configuration form the port struct and
* flush it to the hardware.
*/
@@ -211,10 +205,8 @@ static int saa7164_encoder_initialize(struct saa7164_port *port)
}
/* -- V4L2 --------------------------------------------------------- */
-static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
+int saa7164_s_std(struct saa7164_port *port, v4l2_std_id id)
{
- struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
unsigned int i;
@@ -240,22 +232,33 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
return 0;
}
-static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
+ return saa7164_s_std(fh->port, id);
+}
+
+int saa7164_g_std(struct saa7164_port *port, v4l2_std_id *id)
+{
*id = port->std;
return 0;
}
-static int vidioc_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- int n;
+ struct saa7164_encoder_fh *fh = file->private_data;
+
+ return saa7164_g_std(fh->port, id);
+}
- char *inputs[] = { "tuner", "composite", "svideo", "aux",
- "composite 2", "svideo 2", "aux 2" };
+int saa7164_enum_input(struct file *file, void *priv, struct v4l2_input *i)
+{
+ static const char * const inputs[] = {
+ "tuner", "composite", "svideo", "aux",
+ "composite 2", "svideo 2", "aux 2"
+ };
+ int n;
if (i->index >= 7)
return -EINVAL;
@@ -273,10 +276,8 @@ static int vidioc_enum_input(struct file *file, void *priv,
return 0;
}
-static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+int saa7164_g_input(struct saa7164_port *port, unsigned int *i)
{
- struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
if (saa7164_api_get_videomux(port) != SAA_OK)
@@ -289,10 +290,15 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
return 0;
}
-static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
+
+ return saa7164_g_input(fh->port, i);
+}
+
+int saa7164_s_input(struct saa7164_port *port, unsigned int i)
+{
struct saa7164_dev *dev = port->dev;
dprintk(DBGLVL_ENC, "%s() input=%d\n", __func__, i);
@@ -308,8 +314,14 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
return 0;
}
-static int vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *t)
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+
+ return saa7164_s_input(fh->port, i);
+}
+
+int saa7164_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
struct saa7164_encoder_fh *fh = file->private_data;
struct saa7164_port *port = fh->port;
@@ -319,38 +331,45 @@ static int vidioc_g_tuner(struct file *file, void *priv,
return -EINVAL;
strcpy(t->name, "tuner");
- t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO;
+ t->rangelow = SAA7164_TV_MIN_FREQ;
+ t->rangehigh = SAA7164_TV_MAX_FREQ;
dprintk(DBGLVL_ENC, "VIDIOC_G_TUNER: tuner type %d\n", t->type);
return 0;
}
-static int vidioc_s_tuner(struct file *file, void *priv,
- const struct v4l2_tuner *t)
+int saa7164_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *t)
{
+ if (0 != t->index)
+ return -EINVAL;
+
/* Update the A/V core */
return 0;
}
-static int vidioc_g_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f)
+int saa7164_g_frequency(struct saa7164_port *port, struct v4l2_frequency *f)
{
- struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
+ if (f->tuner)
+ return -EINVAL;
- f->type = V4L2_TUNER_ANALOG_TV;
f->frequency = port->freq;
-
return 0;
}
-static int vidioc_s_frequency(struct file *file, void *priv,
- const struct v4l2_frequency *f)
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
{
struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
+
+ return saa7164_g_frequency(fh->port, f);
+}
+
+int saa7164_s_frequency(struct saa7164_port *port,
+ const struct v4l2_frequency *f)
+{
struct saa7164_dev *dev = port->dev;
struct saa7164_port *tsport;
struct dvb_frontend *fe;
@@ -370,16 +389,13 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (f->tuner != 0)
return -EINVAL;
- if (f->type != V4L2_TUNER_ANALOG_TV)
- return -EINVAL;
-
- port->freq = f->frequency;
+ port->freq = clamp(f->frequency,
+ SAA7164_TV_MIN_FREQ, SAA7164_TV_MAX_FREQ);
/* Update the hardware */
if (port->nr == SAA7164_PORT_ENC1)
tsport = &dev->ports[SAA7164_PORT_TS1];
- else
- if (port->nr == SAA7164_PORT_ENC2)
+ else if (port->nr == SAA7164_PORT_ENC2)
tsport = &dev->ports[SAA7164_PORT_TS2];
else
BUG();
@@ -396,253 +412,54 @@ static int vidioc_s_frequency(struct file *file, void *priv,
return 0;
}
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctl)
+static int vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
{
struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
-
- dprintk(DBGLVL_ENC, "%s(id=%d, value=%d)\n", __func__,
- ctl->id, ctl->value);
-
- switch (ctl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctl->value = port->ctl_brightness;
- break;
- case V4L2_CID_CONTRAST:
- ctl->value = port->ctl_contrast;
- break;
- case V4L2_CID_SATURATION:
- ctl->value = port->ctl_saturation;
- break;
- case V4L2_CID_HUE:
- ctl->value = port->ctl_hue;
- break;
- case V4L2_CID_SHARPNESS:
- ctl->value = port->ctl_sharpness;
- break;
- case V4L2_CID_AUDIO_VOLUME:
- ctl->value = port->ctl_volume;
- break;
- default:
- return -EINVAL;
- }
- return 0;
+ return saa7164_s_frequency(fh->port, f);
}
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctl)
+static int saa7164_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
+ struct saa7164_port *port =
+ container_of(ctrl->handler, struct saa7164_port, ctrl_handler);
+ struct saa7164_encoder_params *params = &port->encoder_params;
int ret = 0;
- dprintk(DBGLVL_ENC, "%s(id=%d, value=%d)\n", __func__,
- ctl->id, ctl->value);
-
- switch (ctl->id) {
+ switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- if ((ctl->value >= 0) && (ctl->value <= 255)) {
- port->ctl_brightness = ctl->value;
- saa7164_api_set_usercontrol(port,
- PU_BRIGHTNESS_CONTROL);
- } else
- ret = -EINVAL;
+ port->ctl_brightness = ctrl->val;
+ saa7164_api_set_usercontrol(port, PU_BRIGHTNESS_CONTROL);
break;
case V4L2_CID_CONTRAST:
- if ((ctl->value >= 0) && (ctl->value <= 255)) {
- port->ctl_contrast = ctl->value;
- saa7164_api_set_usercontrol(port, PU_CONTRAST_CONTROL);
- } else
- ret = -EINVAL;
+ port->ctl_contrast = ctrl->val;
+ saa7164_api_set_usercontrol(port, PU_CONTRAST_CONTROL);
break;
case V4L2_CID_SATURATION:
- if ((ctl->value >= 0) && (ctl->value <= 255)) {
- port->ctl_saturation = ctl->value;
- saa7164_api_set_usercontrol(port,
- PU_SATURATION_CONTROL);
- } else
- ret = -EINVAL;
+ port->ctl_saturation = ctrl->val;
+ saa7164_api_set_usercontrol(port, PU_SATURATION_CONTROL);
break;
case V4L2_CID_HUE:
- if ((ctl->value >= 0) && (ctl->value <= 255)) {
- port->ctl_hue = ctl->value;
- saa7164_api_set_usercontrol(port, PU_HUE_CONTROL);
- } else
- ret = -EINVAL;
+ port->ctl_hue = ctrl->val;
+ saa7164_api_set_usercontrol(port, PU_HUE_CONTROL);
break;
case V4L2_CID_SHARPNESS:
- if ((ctl->value >= 0) && (ctl->value <= 255)) {
- port->ctl_sharpness = ctl->value;
- saa7164_api_set_usercontrol(port, PU_SHARPNESS_CONTROL);
- } else
- ret = -EINVAL;
+ port->ctl_sharpness = ctrl->val;
+ saa7164_api_set_usercontrol(port, PU_SHARPNESS_CONTROL);
break;
case V4L2_CID_AUDIO_VOLUME:
- if ((ctl->value >= -83) && (ctl->value <= 24)) {
- port->ctl_volume = ctl->value;
- saa7164_api_set_audio_volume(port, port->ctl_volume);
- } else
- ret = -EINVAL;
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static int saa7164_get_ctrl(struct saa7164_port *port,
- struct v4l2_ext_control *ctrl)
-{
- struct saa7164_encoder_params *params = &port->encoder_params;
-
- switch (ctrl->id) {
- case V4L2_CID_MPEG_VIDEO_BITRATE:
- ctrl->value = params->bitrate;
- break;
- case V4L2_CID_MPEG_STREAM_TYPE:
- ctrl->value = params->stream_type;
- break;
- case V4L2_CID_MPEG_AUDIO_MUTE:
- ctrl->value = params->ctl_mute;
- break;
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- ctrl->value = params->ctl_aspect;
- break;
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- ctrl->value = params->bitrate_mode;
- break;
- case V4L2_CID_MPEG_VIDEO_B_FRAMES:
- ctrl->value = params->refdist;
- break;
- case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
- ctrl->value = params->bitrate_peak;
- break;
- case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
- ctrl->value = params->gop_size;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int vidioc_g_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *ctrls)
-{
- struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- int i, err = 0;
-
- if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
- for (i = 0; i < ctrls->count; i++) {
- struct v4l2_ext_control *ctrl = ctrls->controls + i;
-
- err = saa7164_get_ctrl(port, ctrl);
- if (err) {
- ctrls->error_idx = i;
- break;
- }
- }
- return err;
-
- }
-
- return -EINVAL;
-}
-
-static int saa7164_try_ctrl(struct v4l2_ext_control *ctrl, int ac3)
-{
- int ret = -EINVAL;
-
- switch (ctrl->id) {
- case V4L2_CID_MPEG_VIDEO_BITRATE:
- if ((ctrl->value >= ENCODER_MIN_BITRATE) &&
- (ctrl->value <= ENCODER_MAX_BITRATE))
- ret = 0;
+ port->ctl_volume = ctrl->val;
+ saa7164_api_set_audio_volume(port, port->ctl_volume);
break;
- case V4L2_CID_MPEG_STREAM_TYPE:
- if ((ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_PS) ||
- (ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_TS))
- ret = 0;
- break;
- case V4L2_CID_MPEG_AUDIO_MUTE:
- if ((ctrl->value >= 0) &&
- (ctrl->value <= 1))
- ret = 0;
- break;
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- if ((ctrl->value >= V4L2_MPEG_VIDEO_ASPECT_1x1) &&
- (ctrl->value <= V4L2_MPEG_VIDEO_ASPECT_221x100))
- ret = 0;
- break;
- case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
- if ((ctrl->value >= 0) &&
- (ctrl->value <= 255))
- ret = 0;
- break;
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- if ((ctrl->value == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) ||
- (ctrl->value == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR))
- ret = 0;
- break;
- case V4L2_CID_MPEG_VIDEO_B_FRAMES:
- if ((ctrl->value >= 1) &&
- (ctrl->value <= 3))
- ret = 0;
- break;
- case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
- if ((ctrl->value >= ENCODER_MIN_BITRATE) &&
- (ctrl->value <= ENCODER_MAX_BITRATE))
- ret = 0;
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static int vidioc_try_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *ctrls)
-{
- int i, err = 0;
-
- if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
- for (i = 0; i < ctrls->count; i++) {
- struct v4l2_ext_control *ctrl = ctrls->controls + i;
-
- err = saa7164_try_ctrl(ctrl, 0);
- if (err) {
- ctrls->error_idx = i;
- break;
- }
- }
- return err;
- }
-
- return -EINVAL;
-}
-
-static int saa7164_set_ctrl(struct saa7164_port *port,
- struct v4l2_ext_control *ctrl)
-{
- struct saa7164_encoder_params *params = &port->encoder_params;
- int ret = 0;
-
- switch (ctrl->id) {
case V4L2_CID_MPEG_VIDEO_BITRATE:
- params->bitrate = ctrl->value;
+ params->bitrate = ctrl->val;
break;
case V4L2_CID_MPEG_STREAM_TYPE:
- params->stream_type = ctrl->value;
+ params->stream_type = ctrl->val;
break;
case V4L2_CID_MPEG_AUDIO_MUTE:
- params->ctl_mute = ctrl->value;
+ params->ctl_mute = ctrl->val;
ret = saa7164_api_audio_mute(port, params->ctl_mute);
if (ret != SAA_OK) {
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
@@ -651,7 +468,7 @@ static int saa7164_set_ctrl(struct saa7164_port *port,
}
break;
case V4L2_CID_MPEG_VIDEO_ASPECT:
- params->ctl_aspect = ctrl->value;
+ params->ctl_aspect = ctrl->val;
ret = saa7164_api_set_aspect_ratio(port);
if (ret != SAA_OK) {
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
@@ -660,55 +477,24 @@ static int saa7164_set_ctrl(struct saa7164_port *port,
}
break;
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- params->bitrate_mode = ctrl->value;
+ params->bitrate_mode = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_B_FRAMES:
- params->refdist = ctrl->value;
+ params->refdist = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
- params->bitrate_peak = ctrl->value;
+ params->bitrate_peak = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
- params->gop_size = ctrl->value;
+ params->gop_size = ctrl->val;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- /* TODO: Update the hardware */
-
return ret;
}
-static int vidioc_s_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *ctrls)
-{
- struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- int i, err = 0;
-
- if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
- for (i = 0; i < ctrls->count; i++) {
- struct v4l2_ext_control *ctrl = ctrls->controls + i;
-
- err = saa7164_try_ctrl(ctrl, 0);
- if (err) {
- ctrls->error_idx = i;
- break;
- }
- err = saa7164_set_ctrl(port, ctrl);
- if (err) {
- ctrls->error_idx = i;
- break;
- }
- }
- return err;
-
- }
-
- return -EINVAL;
-}
-
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
@@ -745,145 +531,22 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+static int vidioc_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct saa7164_encoder_fh *fh = file->private_data;
struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage =
- port->ts_packet_size * port->ts_packet_count;
- f->fmt.pix.colorspace = 0;
+ f->fmt.pix.sizeimage = SAA7164_SIZEIMAGE;
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
f->fmt.pix.width = port->width;
f->fmt.pix.height = port->height;
-
- dprintk(DBGLVL_ENC, "VIDIOC_G_FMT: w: %d, h: %d\n",
- port->width, port->height);
-
- return 0;
-}
-
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
-
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
- f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage =
- port->ts_packet_size * port->ts_packet_count;
- f->fmt.pix.colorspace = 0;
- dprintk(DBGLVL_ENC, "VIDIOC_TRY_FMT: w: %d, h: %d\n",
- port->width, port->height);
return 0;
}
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
-
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
- f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage =
- port->ts_packet_size * port->ts_packet_count;
- f->fmt.pix.colorspace = 0;
-
- dprintk(DBGLVL_ENC, "VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
- f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
-
- return 0;
-}
-
-static int fill_queryctrl(struct saa7164_encoder_params *params,
- struct v4l2_queryctrl *c)
-{
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 127);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 66);
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 62);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 128);
- case V4L2_CID_SHARPNESS:
- return v4l2_ctrl_query_fill(c, 0x0, 0x0f, 1, 8);
- case V4L2_CID_MPEG_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(c, 0x0, 0x01, 1, 0);
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(c, -83, 24, 1, 20);
- case V4L2_CID_MPEG_VIDEO_BITRATE:
- return v4l2_ctrl_query_fill(c,
- ENCODER_MIN_BITRATE, ENCODER_MAX_BITRATE,
- 100000, ENCODER_DEF_BITRATE);
- case V4L2_CID_MPEG_STREAM_TYPE:
- return v4l2_ctrl_query_fill(c,
- V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
- V4L2_MPEG_STREAM_TYPE_MPEG2_TS,
- 1, V4L2_MPEG_STREAM_TYPE_MPEG2_PS);
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- return v4l2_ctrl_query_fill(c,
- V4L2_MPEG_VIDEO_ASPECT_1x1,
- V4L2_MPEG_VIDEO_ASPECT_221x100,
- 1, V4L2_MPEG_VIDEO_ASPECT_4x3);
- case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
- return v4l2_ctrl_query_fill(c, 1, 255, 1, 15);
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- return v4l2_ctrl_query_fill(c,
- V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
- V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
- 1, V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
- case V4L2_CID_MPEG_VIDEO_B_FRAMES:
- return v4l2_ctrl_query_fill(c,
- 1, 3, 1, 1);
- case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
- return v4l2_ctrl_query_fill(c,
- ENCODER_MIN_BITRATE, ENCODER_MAX_BITRATE,
- 100000, ENCODER_DEF_BITRATE);
- default:
- return -EINVAL;
- }
-}
-
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *c)
-{
- struct saa7164_encoder_fh *fh = priv;
- struct saa7164_port *port = fh->port;
- int i, next;
- u32 id = c->id;
-
- memset(c, 0, sizeof(*c));
-
- next = !!(id & V4L2_CTRL_FLAG_NEXT_CTRL);
- c->id = id & ~V4L2_CTRL_FLAG_NEXT_CTRL;
-
- for (i = 0; i < ARRAY_SIZE(saa7164_v4l2_ctrls); i++) {
- if (next) {
- if (c->id < saa7164_v4l2_ctrls[i])
- c->id = saa7164_v4l2_ctrls[i];
- else
- continue;
- }
-
- if (c->id == saa7164_v4l2_ctrls[i])
- return fill_queryctrl(&port->encoder_params, c);
-
- if (c->id < saa7164_v4l2_ctrls[i])
- break;
- }
-
- return -EINVAL;
-}
-
static int saa7164_encoder_stop_port(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
@@ -1084,8 +747,10 @@ static int fops_open(struct file *file)
if (NULL == fh)
return -ENOMEM;
- file->private_data = fh;
fh->port = port;
+ v4l2_fh_init(&fh->fh, video_devdata(file));
+ v4l2_fh_add(&fh->fh);
+ file->private_data = fh;
return 0;
}
@@ -1106,7 +771,8 @@ static int fops_release(struct file *file)
}
}
- file->private_data = NULL;
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
kfree(fh);
return 0;
@@ -1250,10 +916,11 @@ err:
static unsigned int fops_poll(struct file *file, poll_table *wait)
{
+ unsigned long req_events = poll_requested_events(wait);
struct saa7164_encoder_fh *fh =
(struct saa7164_encoder_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- unsigned int mask = 0;
+ unsigned int mask = v4l2_ctrl_poll(file, wait);
port->last_poll_msecs_diff = port->last_poll_msecs;
port->last_poll_msecs = jiffies_to_msecs(jiffies);
@@ -1263,26 +930,18 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
saa7164_histogram_update(&port->poll_interval,
port->last_poll_msecs_diff);
- if (!video_is_registered(port->v4l_device))
- return -EIO;
+ if (!(req_events & (POLLIN | POLLRDNORM)))
+ return mask;
if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
if (atomic_inc_return(&port->v4l_reader_count) == 1) {
if (saa7164_encoder_initialize(port) < 0)
- return -EINVAL;
+ return mask | POLLERR;
saa7164_encoder_start_streaming(port);
msleep(200);
}
}
- /* blocking wait for buffer */
- if ((file->f_flags & O_NONBLOCK) == 0) {
- if (wait_event_interruptible(port->wait_read,
- saa7164_enc_next_buf(port))) {
- return -ERESTARTSYS;
- }
- }
-
/* Pull the first buffer from the used list */
if (!list_empty(&port->list_buf_used.list))
mask |= POLLIN | POLLRDNORM;
@@ -1290,6 +949,10 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
return mask;
}
+static const struct v4l2_ctrl_ops saa7164_ctrl_ops = {
+ .s_ctrl = saa7164_s_ctrl,
+};
+
static const struct v4l2_file_operations mpeg_fops = {
.owner = THIS_MODULE,
.open = fops_open,
@@ -1302,24 +965,21 @@ static const struct v4l2_file_operations mpeg_fops = {
static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_s_std = vidioc_s_std,
.vidioc_g_std = vidioc_g_std,
- .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_enum_input = saa7164_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_tuner = saa7164_g_tuner,
+ .vidioc_s_tuner = saa7164_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
- .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
- .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_fmt_vid_cap = vidioc_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_fmt_vid_cap,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device saa7164_mpeg_template = {
@@ -1357,6 +1017,7 @@ static struct video_device *saa7164_encoder_alloc(
int saa7164_encoder_register(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
+ struct v4l2_ctrl_handler *hdl = &port->ctrl_handler;
int result = -ENODEV;
dprintk(DBGLVL_ENC, "%s()\n", __func__);
@@ -1381,19 +1042,52 @@ int saa7164_encoder_register(struct saa7164_port *port)
port->video_format = EU_VIDEO_FORMAT_MPEG_2;
port->audio_format = 0;
port->video_resolution = 0;
- port->ctl_brightness = 127;
- port->ctl_contrast = 66;
- port->ctl_hue = 128;
- port->ctl_saturation = 62;
- port->ctl_sharpness = 8;
- port->encoder_params.bitrate = ENCODER_DEF_BITRATE;
- port->encoder_params.bitrate_peak = ENCODER_DEF_BITRATE;
- port->encoder_params.bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
- port->encoder_params.stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS;
- port->encoder_params.ctl_mute = 0;
- port->encoder_params.ctl_aspect = V4L2_MPEG_VIDEO_ASPECT_4x3;
- port->encoder_params.refdist = 1;
- port->encoder_params.gop_size = SAA7164_ENCODER_DEFAULT_GOP_SIZE;
+ port->freq = SAA7164_TV_MIN_FREQ;
+
+ v4l2_ctrl_handler_init(hdl, 14);
+ v4l2_ctrl_new_std(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
+ v4l2_ctrl_new_std(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 66);
+ v4l2_ctrl_new_std(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 62);
+ v4l2_ctrl_new_std(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_HUE, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0x0, 0x0f, 1, 8);
+ v4l2_ctrl_new_std(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_MPEG_AUDIO_MUTE, 0x0, 0x01, 1, 0);
+ v4l2_ctrl_new_std(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, -83, 24, 1, 20);
+ v4l2_ctrl_new_std(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE,
+ ENCODER_MIN_BITRATE, ENCODER_MAX_BITRATE,
+ 100000, ENCODER_DEF_BITRATE);
+ v4l2_ctrl_new_std_menu(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_MPEG_STREAM_TYPE,
+ V4L2_MPEG_STREAM_TYPE_MPEG2_TS, 0,
+ V4L2_MPEG_STREAM_TYPE_MPEG2_PS);
+ v4l2_ctrl_new_std_menu(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_ASPECT,
+ V4L2_MPEG_VIDEO_ASPECT_221x100, 0,
+ V4L2_MPEG_VIDEO_ASPECT_4x3);
+ v4l2_ctrl_new_std(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 255, 1, 15);
+ v4l2_ctrl_new_std_menu(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
+ v4l2_ctrl_new_std(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_B_FRAMES, 1, 3, 1, 1);
+ v4l2_ctrl_new_std(hdl, &saa7164_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+ ENCODER_MIN_BITRATE, ENCODER_MAX_BITRATE,
+ 100000, ENCODER_DEF_BITRATE);
+ if (hdl->error) {
+ result = hdl->error;
+ goto failed;
+ }
+
port->std = V4L2_STD_NTSC_M;
if (port->encodernorm.id & V4L2_STD_525_60)
@@ -1412,6 +1106,8 @@ int saa7164_encoder_register(struct saa7164_port *port)
goto failed;
}
+ port->v4l_device->ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
video_set_drvdata(port->v4l_device, port);
result = video_register_device(port->v4l_device,
VFL_TYPE_GRABBER, -1);
@@ -1466,6 +1162,7 @@ void saa7164_encoder_unregister(struct saa7164_port *port)
port->v4l_device = NULL;
}
+ v4l2_ctrl_handler_free(&port->ctrl_handler);
dprintk(DBGLVL_ENC, "%s(port=%d) done\n", __func__, port->nr);
}
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index 859fd03d82f9..ee54491459a6 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -21,20 +21,6 @@
#include "saa7164.h"
-static struct saa7164_tvnorm saa7164_tvnorms[] = {
- {
- .name = "NTSC-M",
- .id = V4L2_STD_NTSC_M,
- }, {
- .name = "NTSC-JP",
- .id = V4L2_STD_NTSC_M_JP,
- }
-};
-
-static const u32 saa7164_v4l2_ctrls[] = {
- 0
-};
-
/* Take the encoder configuration from the port struct and
* flush it to the hardware.
*/
@@ -43,23 +29,13 @@ static void saa7164_vbi_configure(struct saa7164_port *port)
struct saa7164_dev *dev = port->dev;
dprintk(DBGLVL_VBI, "%s()\n", __func__);
- port->vbi_params.width = port->width;
- port->vbi_params.height = port->height;
+ port->vbi_params.width = port->enc_port->width;
+ port->vbi_params.height = port->enc_port->height;
port->vbi_params.is_50hz =
- (port->encodernorm.id & V4L2_STD_625_50) != 0;
+ (port->enc_port->encodernorm.id & V4L2_STD_625_50) != 0;
/* Set up the DIF (enable it) for analog mode by default */
saa7164_api_initialize_dif(port);
-
- /* Configure the correct video standard */
-#if 0
- saa7164_api_configure_dif(port, port->encodernorm.id);
-#endif
-
-#if 0
- /* Ensure the audio decoder is correct configured */
- saa7164_api_set_audio_std(port);
-#endif
dprintk(DBGLVL_VBI, "%s() ends\n", __func__);
}
@@ -186,468 +162,50 @@ static int saa7164_vbi_initialize(struct saa7164_port *port)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
- unsigned int i;
- dprintk(DBGLVL_VBI, "%s(id=0x%x)\n", __func__, (u32)id);
-
- for (i = 0; i < ARRAY_SIZE(saa7164_tvnorms); i++) {
- if (id & saa7164_tvnorms[i].id)
- break;
- }
- if (i == ARRAY_SIZE(saa7164_tvnorms))
- return -EINVAL;
-
- port->encodernorm = saa7164_tvnorms[i];
- port->std = id;
-
- /* Update the audio decoder while is not running in
- * auto detect mode.
- */
- saa7164_api_set_audio_std(port);
-
- dprintk(DBGLVL_VBI, "%s(id=0x%x) OK\n", __func__, (u32)id);
-
- return 0;
+ return saa7164_s_std(fh->port->enc_port, id);
}
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
struct saa7164_encoder_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- *id = port->std;
- return 0;
-}
-
-static int vidioc_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
-{
- int n;
-
- char *inputs[] = { "tuner", "composite", "svideo", "aux",
- "composite 2", "svideo 2", "aux 2" };
-
- if (i->index >= 7)
- return -EINVAL;
-
- strcpy(i->name, inputs[i->index]);
-
- if (i->index == 0)
- i->type = V4L2_INPUT_TYPE_TUNER;
- else
- i->type = V4L2_INPUT_TYPE_CAMERA;
-
- for (n = 0; n < ARRAY_SIZE(saa7164_tvnorms); n++)
- i->std |= saa7164_tvnorms[n].id;
-
- return 0;
+ return saa7164_g_std(fh->port->enc_port, id);
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
-
- if (saa7164_api_get_videomux(port) != SAA_OK)
- return -EIO;
-
- *i = (port->mux_input - 1);
-
- dprintk(DBGLVL_VBI, "%s() input=%d\n", __func__, *i);
- return 0;
+ return saa7164_g_input(fh->port->enc_port, i);
}
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
-
- dprintk(DBGLVL_VBI, "%s() input=%d\n", __func__, i);
-
- if (i >= 7)
- return -EINVAL;
-
- port->mux_input = i + 1;
- if (saa7164_api_set_videomux(port) != SAA_OK)
- return -EIO;
-
- return 0;
-}
-
-static int vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *t)
-{
- struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
-
- if (0 != t->index)
- return -EINVAL;
-
- strcpy(t->name, "tuner");
- t->type = V4L2_TUNER_ANALOG_TV;
- t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO;
-
- dprintk(DBGLVL_VBI, "VIDIOC_G_TUNER: tuner type %d\n", t->type);
-
- return 0;
-}
-
-static int vidioc_s_tuner(struct file *file, void *priv,
- const struct v4l2_tuner *t)
-{
- /* Update the A/V core */
- return 0;
+ return saa7164_s_input(fh->port->enc_port, i);
}
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- f->type = V4L2_TUNER_ANALOG_TV;
- f->frequency = port->freq;
-
- return 0;
+ return saa7164_g_frequency(fh->port->enc_port, f);
}
static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
- struct saa7164_port *tsport;
- struct dvb_frontend *fe;
-
- /* TODO: Pull this for the std */
- struct analog_parameters params = {
- .mode = V4L2_TUNER_ANALOG_TV,
- .audmode = V4L2_TUNER_MODE_STEREO,
- .std = port->encodernorm.id,
- .frequency = f->frequency
- };
-
- /* Stop the encoder */
- dprintk(DBGLVL_VBI, "%s() frequency=%d tuner=%d\n", __func__,
- f->frequency, f->tuner);
-
- if (f->tuner != 0)
- return -EINVAL;
-
- if (f->type != V4L2_TUNER_ANALOG_TV)
- return -EINVAL;
-
- port->freq = f->frequency;
-
- /* Update the hardware */
- if (port->nr == SAA7164_PORT_VBI1)
- tsport = &dev->ports[SAA7164_PORT_TS1];
- else
- if (port->nr == SAA7164_PORT_VBI2)
- tsport = &dev->ports[SAA7164_PORT_TS2];
- else
- BUG();
-
- fe = tsport->dvb.frontend;
-
- if (fe && fe->ops.tuner_ops.set_analog_params)
- fe->ops.tuner_ops.set_analog_params(fe, &params);
- else
- printk(KERN_ERR "%s() No analog tuner, aborting\n", __func__);
-
- saa7164_vbi_initialize(port);
-
- return 0;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctl)
-{
- struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
-
- dprintk(DBGLVL_VBI, "%s(id=%d, value=%d)\n", __func__,
- ctl->id, ctl->value);
-
- switch (ctl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctl->value = port->ctl_brightness;
- break;
- case V4L2_CID_CONTRAST:
- ctl->value = port->ctl_contrast;
- break;
- case V4L2_CID_SATURATION:
- ctl->value = port->ctl_saturation;
- break;
- case V4L2_CID_HUE:
- ctl->value = port->ctl_hue;
- break;
- case V4L2_CID_SHARPNESS:
- ctl->value = port->ctl_sharpness;
- break;
- case V4L2_CID_AUDIO_VOLUME:
- ctl->value = port->ctl_volume;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctl)
-{
- struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
- int ret = 0;
-
- dprintk(DBGLVL_VBI, "%s(id=%d, value=%d)\n", __func__,
- ctl->id, ctl->value);
-
- switch (ctl->id) {
- case V4L2_CID_BRIGHTNESS:
- if ((ctl->value >= 0) && (ctl->value <= 255)) {
- port->ctl_brightness = ctl->value;
- saa7164_api_set_usercontrol(port,
- PU_BRIGHTNESS_CONTROL);
- } else
- ret = -EINVAL;
- break;
- case V4L2_CID_CONTRAST:
- if ((ctl->value >= 0) && (ctl->value <= 255)) {
- port->ctl_contrast = ctl->value;
- saa7164_api_set_usercontrol(port, PU_CONTRAST_CONTROL);
- } else
- ret = -EINVAL;
- break;
- case V4L2_CID_SATURATION:
- if ((ctl->value >= 0) && (ctl->value <= 255)) {
- port->ctl_saturation = ctl->value;
- saa7164_api_set_usercontrol(port,
- PU_SATURATION_CONTROL);
- } else
- ret = -EINVAL;
- break;
- case V4L2_CID_HUE:
- if ((ctl->value >= 0) && (ctl->value <= 255)) {
- port->ctl_hue = ctl->value;
- saa7164_api_set_usercontrol(port, PU_HUE_CONTROL);
- } else
- ret = -EINVAL;
- break;
- case V4L2_CID_SHARPNESS:
- if ((ctl->value >= 0) && (ctl->value <= 255)) {
- port->ctl_sharpness = ctl->value;
- saa7164_api_set_usercontrol(port, PU_SHARPNESS_CONTROL);
- } else
- ret = -EINVAL;
- break;
- case V4L2_CID_AUDIO_VOLUME:
- if ((ctl->value >= -83) && (ctl->value <= 24)) {
- port->ctl_volume = ctl->value;
- saa7164_api_set_audio_volume(port, port->ctl_volume);
- } else
- ret = -EINVAL;
- break;
- default:
- ret = -EINVAL;
- }
+ int ret = saa7164_s_frequency(fh->port->enc_port, f);
+ if (ret == 0)
+ saa7164_vbi_initialize(fh->port);
return ret;
}
-static int saa7164_get_ctrl(struct saa7164_port *port,
- struct v4l2_ext_control *ctrl)
-{
- struct saa7164_vbi_params *params = &port->vbi_params;
-
- switch (ctrl->id) {
- case V4L2_CID_MPEG_STREAM_TYPE:
- ctrl->value = params->stream_type;
- break;
- case V4L2_CID_MPEG_AUDIO_MUTE:
- ctrl->value = params->ctl_mute;
- break;
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- ctrl->value = params->ctl_aspect;
- break;
- case V4L2_CID_MPEG_VIDEO_B_FRAMES:
- ctrl->value = params->refdist;
- break;
- case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
- ctrl->value = params->gop_size;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int vidioc_g_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *ctrls)
-{
- struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- int i, err = 0;
-
- if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
- for (i = 0; i < ctrls->count; i++) {
- struct v4l2_ext_control *ctrl = ctrls->controls + i;
-
- err = saa7164_get_ctrl(port, ctrl);
- if (err) {
- ctrls->error_idx = i;
- break;
- }
- }
- return err;
-
- }
-
- return -EINVAL;
-}
-
-static int saa7164_try_ctrl(struct v4l2_ext_control *ctrl, int ac3)
-{
- int ret = -EINVAL;
-
- switch (ctrl->id) {
- case V4L2_CID_MPEG_STREAM_TYPE:
- if ((ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_PS) ||
- (ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_TS))
- ret = 0;
- break;
- case V4L2_CID_MPEG_AUDIO_MUTE:
- if ((ctrl->value >= 0) &&
- (ctrl->value <= 1))
- ret = 0;
- break;
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- if ((ctrl->value >= V4L2_MPEG_VIDEO_ASPECT_1x1) &&
- (ctrl->value <= V4L2_MPEG_VIDEO_ASPECT_221x100))
- ret = 0;
- break;
- case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
- if ((ctrl->value >= 0) &&
- (ctrl->value <= 255))
- ret = 0;
- break;
- case V4L2_CID_MPEG_VIDEO_B_FRAMES:
- if ((ctrl->value >= 1) &&
- (ctrl->value <= 3))
- ret = 0;
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static int vidioc_try_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *ctrls)
-{
- int i, err = 0;
-
- if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
- for (i = 0; i < ctrls->count; i++) {
- struct v4l2_ext_control *ctrl = ctrls->controls + i;
-
- err = saa7164_try_ctrl(ctrl, 0);
- if (err) {
- ctrls->error_idx = i;
- break;
- }
- }
- return err;
- }
-
- return -EINVAL;
-}
-
-static int saa7164_set_ctrl(struct saa7164_port *port,
- struct v4l2_ext_control *ctrl)
-{
- struct saa7164_vbi_params *params = &port->vbi_params;
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_MPEG_STREAM_TYPE:
- params->stream_type = ctrl->value;
- break;
- case V4L2_CID_MPEG_AUDIO_MUTE:
- params->ctl_mute = ctrl->value;
- ret = saa7164_api_audio_mute(port, params->ctl_mute);
- if (ret != SAA_OK) {
- printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
- ret);
- ret = -EIO;
- }
- break;
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- params->ctl_aspect = ctrl->value;
- ret = saa7164_api_set_aspect_ratio(port);
- if (ret != SAA_OK) {
- printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
- ret);
- ret = -EIO;
- }
- break;
- case V4L2_CID_MPEG_VIDEO_B_FRAMES:
- params->refdist = ctrl->value;
- break;
- case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
- params->gop_size = ctrl->value;
- break;
- default:
- return -EINVAL;
- }
-
- /* TODO: Update the hardware */
-
- return ret;
-}
-
-static int vidioc_s_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *ctrls)
-{
- struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- int i, err = 0;
-
- if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
- for (i = 0; i < ctrls->count; i++) {
- struct v4l2_ext_control *ctrl = ctrls->controls + i;
-
- err = saa7164_try_ctrl(ctrl, 0);
- if (err) {
- ctrls->error_idx = i;
- break;
- }
- err = saa7164_set_ctrl(port, ctrl);
- if (err) {
- ctrls->error_idx = i;
- break;
- }
- }
- return err;
-
- }
-
- return -EINVAL;
-}
-
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
@@ -672,144 +230,6 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- if (f->index != 0)
- return -EINVAL;
-
- strlcpy(f->description, "VBI", sizeof(f->description));
- f->pixelformat = V4L2_PIX_FMT_MPEG;
-
- return 0;
-}
-
-static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
-
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
- f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage =
- port->ts_packet_size * port->ts_packet_count;
- f->fmt.pix.colorspace = 0;
- f->fmt.pix.width = port->width;
- f->fmt.pix.height = port->height;
-
- dprintk(DBGLVL_VBI, "VIDIOC_G_FMT: w: %d, h: %d\n",
- port->width, port->height);
-
- return 0;
-}
-
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
-
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
- f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage =
- port->ts_packet_size * port->ts_packet_count;
- f->fmt.pix.colorspace = 0;
- dprintk(DBGLVL_VBI, "VIDIOC_TRY_FMT: w: %d, h: %d\n",
- port->width, port->height);
- return 0;
-}
-
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct saa7164_vbi_fh *fh = file->private_data;
- struct saa7164_port *port = fh->port;
- struct saa7164_dev *dev = port->dev;
-
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
- f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage =
- port->ts_packet_size * port->ts_packet_count;
- f->fmt.pix.colorspace = 0;
-
- dprintk(DBGLVL_VBI, "VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
- f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
-
- return 0;
-}
-
-static int fill_queryctrl(struct saa7164_vbi_params *params,
- struct v4l2_queryctrl *c)
-{
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 127);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 66);
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 62);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 128);
- case V4L2_CID_SHARPNESS:
- return v4l2_ctrl_query_fill(c, 0x0, 0x0f, 1, 8);
- case V4L2_CID_MPEG_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(c, 0x0, 0x01, 1, 0);
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(c, -83, 24, 1, 20);
- case V4L2_CID_MPEG_STREAM_TYPE:
- return v4l2_ctrl_query_fill(c,
- V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
- V4L2_MPEG_STREAM_TYPE_MPEG2_TS,
- 1, V4L2_MPEG_STREAM_TYPE_MPEG2_PS);
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- return v4l2_ctrl_query_fill(c,
- V4L2_MPEG_VIDEO_ASPECT_1x1,
- V4L2_MPEG_VIDEO_ASPECT_221x100,
- 1, V4L2_MPEG_VIDEO_ASPECT_4x3);
- case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
- return v4l2_ctrl_query_fill(c, 1, 255, 1, 15);
- case V4L2_CID_MPEG_VIDEO_B_FRAMES:
- return v4l2_ctrl_query_fill(c,
- 1, 3, 1, 1);
- default:
- return -EINVAL;
- }
-}
-
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *c)
-{
- struct saa7164_vbi_fh *fh = priv;
- struct saa7164_port *port = fh->port;
- int i, next;
- u32 id = c->id;
-
- memset(c, 0, sizeof(*c));
-
- next = !!(id & V4L2_CTRL_FLAG_NEXT_CTRL);
- c->id = id & ~V4L2_CTRL_FLAG_NEXT_CTRL;
-
- for (i = 0; i < ARRAY_SIZE(saa7164_v4l2_ctrls); i++) {
- if (next) {
- if (c->id < saa7164_v4l2_ctrls[i])
- c->id = saa7164_v4l2_ctrls[i];
- else
- continue;
- }
-
- if (c->id == saa7164_v4l2_ctrls[i])
- return fill_queryctrl(&port->vbi_params, c);
-
- if (c->id < saa7164_v4l2_ctrls[i])
- break;
- }
-
- return -EINVAL;
-}
-
static int saa7164_vbi_stop_port(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
@@ -999,7 +419,6 @@ static int saa7164_vbi_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
/* ntsc */
- f->fmt.vbi.samples_per_line = 1600;
f->fmt.vbi.samples_per_line = 1440;
f->fmt.vbi.sampling_rate = 27000000;
f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
@@ -1009,6 +428,7 @@ static int saa7164_vbi_fmt(struct file *file, void *priv,
f->fmt.vbi.count[0] = 18;
f->fmt.vbi.start[1] = 263 + 10 + 1;
f->fmt.vbi.count[1] = 18;
+ memset(f->fmt.vbi.reserved, 0, sizeof(f->fmt.vbi.reserved));
return 0;
}
@@ -1031,8 +451,10 @@ static int fops_open(struct file *file)
if (NULL == fh)
return -ENOMEM;
- file->private_data = fh;
fh->port = port;
+ v4l2_fh_init(&fh->fh, video_devdata(file));
+ v4l2_fh_add(&fh->fh);
+ file->private_data = fh;
return 0;
}
@@ -1053,7 +475,8 @@ static int fops_release(struct file *file)
}
}
- file->private_data = NULL;
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
kfree(fh);
return 0;
@@ -1248,24 +671,14 @@ static const struct v4l2_file_operations vbi_fops = {
static const struct v4l2_ioctl_ops vbi_ioctl_ops = {
.vidioc_s_std = vidioc_s_std,
.vidioc_g_std = vidioc_g_std,
- .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_enum_input = saa7164_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_tuner = saa7164_g_tuner,
+ .vidioc_s_tuner = saa7164_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
- .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
- .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
- .vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_fmt_vbi_cap = saa7164_vbi_fmt,
.vidioc_try_fmt_vbi_cap = saa7164_vbi_fmt,
.vidioc_s_fmt_vbi_cap = saa7164_vbi_fmt,
@@ -1335,7 +748,7 @@ int saa7164_vbi_register(struct saa7164_port *port)
goto failed;
}
- port->std = V4L2_STD_NTSC_M;
+ port->enc_port = &dev->ports[port->nr - 2];
video_set_drvdata(port->v4l_device, port);
result = video_register_device(port->v4l_device,
VFL_TYPE_VBI, -1);
diff --git a/drivers/media/pci/saa7164/saa7164.h b/drivers/media/pci/saa7164/saa7164.h
index 18906e0c80e1..8337524bfb8c 100644
--- a/drivers/media/pci/saa7164/saa7164.h
+++ b/drivers/media/pci/saa7164/saa7164.h
@@ -54,8 +54,6 @@
#include <media/tuner.h>
#include <media/tveeprom.h>
-#include <media/videobuf-dma-sg.h>
-#include <media/videobuf-dvb.h>
#include <dvb_demux.h>
#include <dvb_frontend.h>
#include <dvb_net.h>
@@ -64,6 +62,8 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include "saa7164-reg.h"
#include "saa7164-types.h"
@@ -117,7 +117,11 @@
#define DBGLVL_CPU 8192
#define SAA7164_NORMS \
- (V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_JP | V4L2_STD_NTSC_443)
+ (V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_JP)
+
+/* TV frequency range copied from tuner-core.c */
+#define SAA7164_TV_MIN_FREQ (44U * 16U)
+#define SAA7164_TV_MAX_FREQ (958U * 16U)
enum port_t {
SAA7164_MPEG_UNDEFINED = 0,
@@ -185,11 +189,13 @@ struct saa7164_subid {
};
struct saa7164_encoder_fh {
+ struct v4l2_fh fh;
struct saa7164_port *port;
atomic_t v4l_reading;
};
struct saa7164_vbi_fh {
+ struct v4l2_fh fh;
struct saa7164_port *port;
atomic_t v4l_reading;
};
@@ -381,12 +387,11 @@ struct saa7164_port {
/* Encoder */
/* Defaults established in saa7164-encoder.c */
struct saa7164_tvnorm encodernorm;
+ struct v4l2_ctrl_handler ctrl_handler;
v4l2_std_id std;
u32 height;
u32 width;
u32 freq;
- u32 ts_packet_size;
- u32 ts_packet_count;
u8 mux_input;
u8 encoder_profile;
u8 video_format;
@@ -419,6 +424,7 @@ struct saa7164_port {
/* V4L VBI */
struct tmComResVBIFormatDescrHeader vbi_fmt_ntsc;
struct saa7164_vbi_params vbi_params;
+ struct saa7164_port *enc_port;
/* Debug */
u32 sync_errors;
@@ -594,6 +600,16 @@ extern int saa7164_buffer_zero_offsets(struct saa7164_port *port, int i);
/* ----------------------------------------------------------- */
/* saa7164-encoder.c */
+int saa7164_s_std(struct saa7164_port *port, v4l2_std_id id);
+int saa7164_g_std(struct saa7164_port *port, v4l2_std_id *id);
+int saa7164_enum_input(struct file *file, void *priv, struct v4l2_input *i);
+int saa7164_g_input(struct saa7164_port *port, unsigned int *i);
+int saa7164_s_input(struct saa7164_port *port, unsigned int i);
+int saa7164_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
+int saa7164_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t);
+int saa7164_g_frequency(struct saa7164_port *port, struct v4l2_frequency *f);
+int saa7164_s_frequency(struct saa7164_port *port,
+ const struct v4l2_frequency *f);
int saa7164_encoder_register(struct saa7164_port *port);
void saa7164_encoder_unregister(struct saa7164_port *port);
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 53fff5425c13..1bd2fd47421f 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -458,11 +458,12 @@ static inline u32 vop_usec(const vop_header *vh)
static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
struct vb2_buffer *vb, const vop_header *vh)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct solo_dev *solo_dev = solo_enc->solo_dev;
- struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
int frame_size;
- vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
if (vb2_plane_size(vb, 0) < vop_jpeg_size(vh) + solo_enc->jpeg_len)
return -EIO;
@@ -470,7 +471,7 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN);
vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len);
- return solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf,
+ return solo_send_desc(solo_enc, solo_enc->jpeg_len, sgt,
vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev),
frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
SOLO_JPEG_EXT_SIZE(solo_dev));
@@ -479,8 +480,9 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
struct vb2_buffer *vb, const vop_header *vh)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct solo_dev *solo_dev = solo_enc->solo_dev;
- struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
int frame_off, frame_size;
int skip = 0;
@@ -488,15 +490,15 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
return -EIO;
/* If this is a key frame, add extra header */
- vb->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
+ vbuf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
V4L2_BUF_FLAG_BFRAME);
if (!vop_type(vh)) {
skip = solo_enc->vop_len;
- vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) +
solo_enc->vop_len);
} else {
- vb->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+ vbuf->flags |= V4L2_BUF_FLAG_PFRAME;
vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh));
}
@@ -505,7 +507,7 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN);
- return solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
+ return solo_send_desc(solo_enc, skip, sgt, frame_off, frame_size,
SOLO_MP4E_EXT_ADDR(solo_dev),
SOLO_MP4E_EXT_SIZE(solo_dev));
}
@@ -513,6 +515,7 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
struct vb2_buffer *vb, struct solo_enc_buf *enc_buf)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
const vop_header *vh = enc_buf->vh;
int ret;
@@ -527,17 +530,18 @@ static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
}
if (!ret) {
- vb->v4l2_buf.sequence = solo_enc->sequence++;
- vb->v4l2_buf.timestamp.tv_sec = vop_sec(vh);
- vb->v4l2_buf.timestamp.tv_usec = vop_usec(vh);
+ vbuf->sequence = solo_enc->sequence++;
+ vbuf->timestamp.tv_sec = vop_sec(vh);
+ vbuf->timestamp.tv_usec = vop_usec(vh);
/* Check for motion flags */
if (solo_is_motion_on(solo_enc) && enc_buf->motion) {
struct v4l2_event ev = {
.type = V4L2_EVENT_MOTION_DET,
.u.motion_det = {
- .flags = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ,
- .frame_sequence = vb->v4l2_buf.sequence,
+ .flags
+ = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ,
+ .frame_sequence = vbuf->sequence,
.region_mask = enc_buf->motion ? 1 : 0,
},
};
@@ -571,7 +575,7 @@ static void solo_enc_handle_one(struct solo_enc_dev *solo_enc,
list_del(&vb->list);
spin_unlock_irqrestore(&solo_enc->av_lock, flags);
- solo_enc_fillbuf(solo_enc, &vb->vb, enc_buf);
+ solo_enc_fillbuf(solo_enc, &vb->vb.vb2_buf, enc_buf);
unlock:
mutex_unlock(&solo_enc->lock);
}
@@ -659,7 +663,7 @@ static int solo_ring_thread(void *data)
}
static int solo_enc_queue_setup(struct vb2_queue *q,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *num_buffers,
unsigned int *num_planes, unsigned int sizes[],
void *alloc_ctxs[])
@@ -678,10 +682,11 @@ static int solo_enc_queue_setup(struct vb2_queue *q,
static void solo_enc_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vq);
struct solo_vb2_buf *solo_vb =
- container_of(vb, struct solo_vb2_buf, vb);
+ container_of(vbuf, struct solo_vb2_buf, vb);
spin_lock(&solo_enc->av_lock);
list_add_tail(&solo_vb->list, &solo_enc->vidq_active);
@@ -734,25 +739,26 @@ static void solo_enc_stop_streaming(struct vb2_queue *q)
struct solo_vb2_buf, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&solo_enc->av_lock, flags);
}
static void solo_enc_buf_finish(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vb->vb2_queue);
- struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
switch (solo_enc->fmt) {
case V4L2_PIX_FMT_MPEG4:
case V4L2_PIX_FMT_H264:
- if (vb->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME)
- sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
+ if (vbuf->flags & V4L2_BUF_FLAG_KEYFRAME)
+ sg_copy_from_buffer(sgt->sgl, sgt->nents,
solo_enc->vop, solo_enc->vop_len);
break;
default: /* V4L2_PIX_FMT_MJPEG */
- sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
+ sg_copy_from_buffer(sgt->sgl, sgt->nents,
solo_enc->jpeg_header, solo_enc->jpeg_len);
break;
}
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index 63ae8a61f603..26df903585d7 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -26,6 +26,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "solo6x10.h"
@@ -191,13 +192,14 @@ static int solo_v4l2_set_ch(struct solo_dev *solo_dev, u8 ch)
static void solo_fillbuf(struct solo_dev *solo_dev,
struct vb2_buffer *vb)
{
- dma_addr_t vbuf;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ dma_addr_t addr;
unsigned int fdma_addr;
int error = -1;
int i;
- vbuf = vb2_dma_contig_plane_dma_addr(vb, 0);
- if (!vbuf)
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (!addr)
goto finish_buf;
if (erase_off(solo_dev)) {
@@ -213,7 +215,7 @@ static void solo_fillbuf(struct solo_dev *solo_dev,
fdma_addr = SOLO_DISP_EXT_ADDR + (solo_dev->old_write *
(SOLO_HW_BPL * solo_vlines(solo_dev)));
- error = solo_p2m_dma_t(solo_dev, 0, vbuf, fdma_addr,
+ error = solo_p2m_dma_t(solo_dev, 0, addr, fdma_addr,
solo_bytesperline(solo_dev),
solo_vlines(solo_dev), SOLO_HW_BPL);
}
@@ -222,8 +224,8 @@ finish_buf:
if (!error) {
vb2_set_plane_payload(vb, 0,
solo_vlines(solo_dev) * solo_bytesperline(solo_dev));
- vb->v4l2_buf.sequence = solo_dev->sequence++;
- v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
+ vbuf->sequence = solo_dev->sequence++;
+ v4l2_get_timestamp(&vbuf->timestamp);
}
vb2_buffer_done(vb, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
@@ -256,7 +258,7 @@ static void solo_thread_try(struct solo_dev *solo_dev)
spin_unlock(&solo_dev->slock);
- solo_fillbuf(solo_dev, &vb->vb);
+ solo_fillbuf(solo_dev, &vb->vb.vb2_buf);
}
assert_spin_locked(&solo_dev->slock);
@@ -311,7 +313,7 @@ static void solo_stop_thread(struct solo_dev *solo_dev)
solo_dev->kthread = NULL;
}
-static int solo_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int solo_queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -345,10 +347,11 @@ static void solo_stop_streaming(struct vb2_queue *q)
static void solo_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
struct solo_dev *solo_dev = vb2_get_drv_priv(vq);
struct solo_vb2_buf *solo_vb =
- container_of(vb, struct solo_vb2_buf, vb);
+ container_of(vbuf, struct solo_vb2_buf, vb);
spin_lock(&solo_dev->slock);
list_add_tail(&solo_vb->list, &solo_dev->vidq_active);
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index 27423d7f5410..4ab6586c0467 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -35,7 +35,7 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include "solo6x10-regs.h"
@@ -135,7 +135,7 @@ struct solo_p2m_dev {
#define OSD_TEXT_MAX 44
struct solo_vb2_buf {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 59b3a36a3639..6367b455a7e7 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -88,11 +88,11 @@
struct vip_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
dma_addr_t dma;
};
-static inline struct vip_buffer *to_vip_buffer(struct vb2_buffer *vb2)
+static inline struct vip_buffer *to_vip_buffer(struct vb2_v4l2_buffer *vb2)
{
return container_of(vb2, struct vip_buffer, vb);
}
@@ -265,7 +265,7 @@ static void vip_active_buf_next(struct sta2x11_vip *vip)
/* Videobuf2 Operations */
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -287,7 +287,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
};
static int buffer_init(struct vb2_buffer *vb)
{
- struct vip_buffer *vip_buf = to_vip_buffer(vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
vip_buf->dma = vb2_dma_contig_plane_dma_addr(vb, 0);
INIT_LIST_HEAD(&vip_buf->list);
@@ -296,8 +297,9 @@ static int buffer_init(struct vb2_buffer *vb)
static int buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
- struct vip_buffer *vip_buf = to_vip_buffer(vb);
+ struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
unsigned long size;
size = vip->format.sizeimage;
@@ -307,14 +309,15 @@ static int buffer_prepare(struct vb2_buffer *vb)
return -EINVAL;
}
- vb2_set_plane_payload(&vip_buf->vb, 0, size);
+ vb2_set_plane_payload(&vip_buf->vb.vb2_buf, 0, size);
return 0;
}
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
- struct vip_buffer *vip_buf = to_vip_buffer(vb);
+ struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
spin_lock(&vip->lock);
list_add_tail(&vip_buf->list, &vip->buffer_list);
@@ -329,8 +332,9 @@ static void buffer_queue(struct vb2_buffer *vb)
}
static void buffer_finish(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
- struct vip_buffer *vip_buf = to_vip_buffer(vb);
+ struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
/* Buffer handled, remove it from the list */
spin_lock(&vip->lock);
@@ -370,7 +374,7 @@ static void stop_streaming(struct vb2_queue *vq)
/* Release all active buffers */
spin_lock(&vip->lock);
list_for_each_entry_safe(vip_buf, node, &vip->buffer_list, list) {
- vb2_buffer_done(&vip_buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&vip_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
list_del(&vip_buf->list);
}
spin_unlock(&vip->lock);
@@ -813,9 +817,9 @@ static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
/* Disable acquisition */
reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA);
/* Remove the active buffer from the list */
- v4l2_get_timestamp(&vip->active->vb.v4l2_buf.timestamp);
- vip->active->vb.v4l2_buf.sequence = vip->sequence++;
- vb2_buffer_done(&vip->active->vb, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&vip->active->vb.timestamp);
+ vip->active->vb.sequence = vip->sequence++;
+ vb2_buffer_done(&vip->active->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
return IRQ_HANDLED;
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index 3f24fce74fc1..f89364951ebd 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -303,7 +303,6 @@ static int arm_thread(void *data)
static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len,
u8 *buffer2, size_t buffer2_len,
struct dvb_demux_filter *dvbdmxfilter,
- enum dmx_success success,
struct av7110 *av7110)
{
if (!dvbdmxfilter->feed->demux->dmx.frontend)
@@ -329,16 +328,14 @@ static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len,
}
return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len,
buffer2, buffer2_len,
- &dvbdmxfilter->filter,
- DMX_OK);
+ &dvbdmxfilter->filter);
case DMX_TYPE_TS:
if (!(dvbdmxfilter->feed->ts_type & TS_PACKET))
return 0;
if (dvbdmxfilter->feed->ts_type & TS_PAYLOAD_ONLY)
return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len,
buffer2, buffer2_len,
- &dvbdmxfilter->feed->feed.ts,
- DMX_OK);
+ &dvbdmxfilter->feed->feed.ts);
else
av7110_p2t_write(buffer1, buffer1_len,
dvbdmxfilter->feed->pid,
@@ -422,7 +419,7 @@ static void debiirq(unsigned long cookie)
DvbDmxFilterCallback((u8 *)av7110->debi_virt,
av7110->debilen, NULL, 0,
av7110->handle2filter[handle],
- DMX_OK, av7110);
+ av7110);
xfer = RX_BUFF;
break;
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c
index 9544cfc06601..9ed1ec7d3551 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/media/pci/ttpci/av7110_av.c
@@ -102,7 +102,7 @@ int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len)
buf[4] = buf[5] = 0;
if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY)
return dvbdmxfeed->cb.ts(buf, len, NULL, 0,
- &dvbdmxfeed->feed.ts, DMX_OK);
+ &dvbdmxfeed->feed.ts);
else
return dvb_filter_pes2ts(p2t, buf, len, 1);
}
@@ -112,7 +112,7 @@ static int dvb_filter_pes2ts_cb(void *priv, unsigned char *data)
struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) priv;
dvbdmxfeed->cb.ts(data, 188, NULL, 0,
- &dvbdmxfeed->feed.ts, DMX_OK);
+ &dvbdmxfeed->feed.ts);
return 0;
}
@@ -815,7 +815,7 @@ static void p_to_t(u8 const *buf, long int length, u16 pid, u8 *counter,
memcpy(obuf + l, buf + c, TS_SIZE - l);
c = length;
}
- feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts, DMX_OK);
+ feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts);
pes_start = 0;
}
}
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 8355e55b4e8e..4c3293dcddbc 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -376,10 +376,11 @@ static int tw68_buffer_count(unsigned int size, unsigned int count)
/* ------------------------------------------------------------- */
/* vb2 queue operations */
-static int tw68_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int tw68_queue_setup(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct tw68_dev *dev = vb2_get_drv_priv(q);
unsigned tot_bufs = q->num_buffers + *num_buffers;
@@ -423,9 +424,10 @@ static int tw68_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
*/
static void tw68_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
struct tw68_dev *dev = vb2_get_drv_priv(vq);
- struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
+ struct tw68_buf *buf = container_of(vbuf, struct tw68_buf, vb);
struct tw68_buf *prev;
unsigned long flags;
@@ -457,9 +459,10 @@ static void tw68_buf_queue(struct vb2_buffer *vb)
*/
static int tw68_buf_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
struct tw68_dev *dev = vb2_get_drv_priv(vq);
- struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
+ struct tw68_buf *buf = container_of(vbuf, struct tw68_buf, vb);
struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
unsigned size, bpl;
@@ -499,9 +502,10 @@ static int tw68_buf_prepare(struct vb2_buffer *vb)
static void tw68_buf_finish(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
struct tw68_dev *dev = vb2_get_drv_priv(vq);
- struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
+ struct tw68_buf *buf = container_of(vbuf, struct tw68_buf, vb);
pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma);
}
@@ -528,7 +532,7 @@ static void tw68_stop_streaming(struct vb2_queue *q)
container_of(dev->active.next, struct tw68_buf, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
}
@@ -1012,10 +1016,10 @@ void tw68_irq_video_done(struct tw68_dev *dev, unsigned long status)
buf = list_entry(dev->active.next, struct tw68_buf, list);
list_del(&buf->list);
spin_unlock(&dev->slock);
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- buf->vb.v4l2_buf.field = dev->field;
- buf->vb.v4l2_buf.sequence = dev->seqnr++;
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&buf->vb.timestamp);
+ buf->vb.field = dev->field;
+ buf->vb.sequence = dev->seqnr++;
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
status &= ~(TW68_DMAPI);
if (0 == status)
return;
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
index ef51e4d48866..6c7dcb300f34 100644
--- a/drivers/media/pci/tw68/tw68.h
+++ b/drivers/media/pci/tw68/tw68.h
@@ -36,6 +36,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-sg.h>
#include "tw68-reg.h"
@@ -118,7 +119,7 @@ struct tw68_dev; /* forward delclaration */
/* buffer for one video/vbi/ts frame */
struct tw68_buf {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
unsigned int size;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index dc75694ac12d..ccbc9742cb7a 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -233,7 +233,7 @@ config VIDEO_SH_VEU
config VIDEO_RENESAS_JPU
tristate "Renesas JPEG Processing Unit"
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
depends on ARCH_SHMOBILE || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index c8447fa3fd91..f0480d687f17 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -307,7 +307,8 @@ static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc)
return container_of(ccdc, struct vpfe_device, ccdc);
}
-static inline struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_buffer *vb)
+static inline
+struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_v4l2_buffer *vb)
{
return container_of(vb, struct vpfe_cap_buffer, vb);
}
@@ -1257,14 +1258,14 @@ static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
list_del(&vpfe->next_frm->list);
vpfe_set_sdr_addr(&vpfe->ccdc,
- vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb, 0));
+ vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0));
}
static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
{
unsigned long addr;
- addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb, 0) +
+ addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
vpfe->field_off;
vpfe_set_sdr_addr(&vpfe->ccdc, addr);
@@ -1280,10 +1281,10 @@ static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
*/
static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
{
- v4l2_get_timestamp(&vpfe->cur_frm->vb.v4l2_buf.timestamp);
- vpfe->cur_frm->vb.v4l2_buf.field = vpfe->fmt.fmt.pix.field;
- vpfe->cur_frm->vb.v4l2_buf.sequence = vpfe->sequence++;
- vb2_buffer_done(&vpfe->cur_frm->vb, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&vpfe->cur_frm->vb.timestamp);
+ vpfe->cur_frm->vb.field = vpfe->fmt.fmt.pix.field;
+ vpfe->cur_frm->vb.sequence = vpfe->sequence++;
+ vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
vpfe->cur_frm = vpfe->next_frm;
}
@@ -1907,10 +1908,11 @@ static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
* the buffer count and buffer size
*/
static int vpfe_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
if (fmt && fmt->fmt.pix.sizeimage < vpfe->fmt.fmt.pix.sizeimage)
@@ -1942,6 +1944,7 @@ static int vpfe_queue_setup(struct vb2_queue *vq,
*/
static int vpfe_buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage);
@@ -1949,7 +1952,7 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb)
if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
return -EINVAL;
- vb->v4l2_buf.field = vpfe->fmt.fmt.pix.field;
+ vbuf->field = vpfe->fmt.fmt.pix.field;
return 0;
}
@@ -1960,8 +1963,9 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb)
*/
static void vpfe_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
- struct vpfe_cap_buffer *buf = to_vpfe_buffer(vb);
+ struct vpfe_cap_buffer *buf = to_vpfe_buffer(vbuf);
unsigned long flags = 0;
/* add the buffer to the DMA queue */
@@ -2006,7 +2010,7 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
list_del(&vpfe->cur_frm->list);
spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
- addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb.vb2_buf, 0);
vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr));
@@ -2023,7 +2027,7 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
err:
list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
}
return ret;
@@ -2055,13 +2059,14 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
/* release all active buffers */
spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
if (vpfe->cur_frm == vpfe->next_frm) {
- vb2_buffer_done(&vpfe->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
} else {
if (vpfe->cur_frm != NULL)
- vb2_buffer_done(&vpfe->cur_frm->vb,
+ vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
if (vpfe->next_frm != NULL)
- vb2_buffer_done(&vpfe->next_frm->vb,
+ vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
@@ -2069,7 +2074,8 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
vpfe->next_frm = list_entry(vpfe->dma_queue.next,
struct vpfe_cap_buffer, list);
list_del(&vpfe->next_frm->list);
- vb2_buffer_done(&vpfe->next_frm->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
}
@@ -2546,11 +2552,12 @@ static int vpfe_probe(struct platform_device *pdev)
if (IS_ERR(ccdc->ccdc_cfg.base_addr))
return PTR_ERR(ccdc->ccdc_cfg.base_addr);
- vpfe->irq = platform_get_irq(pdev, 0);
- if (vpfe->irq <= 0) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
return -ENODEV;
}
+ vpfe->irq = ret;
ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0,
"vpfe_capture0", vpfe);
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/am437x/am437x-vpfe.h
index 5bfb35649a39..777bf97fea57 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/am437x/am437x-vpfe.h
@@ -31,6 +31,7 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "am437x-vpfe_regs.h"
@@ -104,7 +105,7 @@ struct vpfe_config {
};
struct vpfe_cap_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index b7e70fb05eb8..7764b9c482ef 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -54,7 +54,7 @@ struct bcap_format {
};
struct bcap_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
@@ -149,7 +149,7 @@ static const struct bcap_format bcap_formats[] = {
static irqreturn_t bcap_isr(int irq, void *dev_id);
-static struct bcap_buffer *to_bcap_vb(struct vb2_buffer *vb)
+static struct bcap_buffer *to_bcap_vb(struct vb2_v4l2_buffer *vb)
{
return container_of(vb, struct bcap_buffer, vb);
}
@@ -202,10 +202,11 @@ static void bcap_free_sensor_formats(struct bcap_device *bcap_dev)
}
static int bcap_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
if (fmt && fmt->fmt.pix.sizeimage < bcap_dev->fmt.sizeimage)
@@ -223,6 +224,7 @@ static int bcap_queue_setup(struct vb2_queue *vq,
static int bcap_buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
unsigned long size = bcap_dev->fmt.sizeimage;
@@ -233,15 +235,16 @@ static int bcap_buffer_prepare(struct vb2_buffer *vb)
}
vb2_set_plane_payload(vb, 0, size);
- vb->v4l2_buf.field = bcap_dev->fmt.field;
+ vbuf->field = bcap_dev->fmt.field;
return 0;
}
static void bcap_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
- struct bcap_buffer *buf = to_bcap_vb(vb);
+ struct bcap_buffer *buf = to_bcap_vb(vbuf);
unsigned long flags;
spin_lock_irqsave(&bcap_dev->lock, flags);
@@ -251,8 +254,9 @@ static void bcap_buffer_queue(struct vb2_buffer *vb)
static void bcap_buffer_cleanup(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
- struct bcap_buffer *buf = to_bcap_vb(vb);
+ struct bcap_buffer *buf = to_bcap_vb(vbuf);
unsigned long flags;
spin_lock_irqsave(&bcap_dev->lock, flags);
@@ -333,7 +337,8 @@ static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
struct bcap_buffer, list);
/* remove buffer from the dma queue */
list_del_init(&bcap_dev->cur_frm->list);
- addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb.vb2_buf,
+ 0);
/* update DMA address */
ppi->ops->update_addr(ppi, (unsigned long)addr);
/* enable ppi */
@@ -344,7 +349,7 @@ static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
err:
list_for_each_entry_safe(buf, tmp, &bcap_dev->dma_queue, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
}
return ret;
@@ -367,13 +372,15 @@ static void bcap_stop_streaming(struct vb2_queue *vq)
/* release all active buffers */
if (bcap_dev->cur_frm)
- vb2_buffer_done(&bcap_dev->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&bcap_dev->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
while (!list_empty(&bcap_dev->dma_queue)) {
bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
struct bcap_buffer, list);
list_del_init(&bcap_dev->cur_frm->list);
- vb2_buffer_done(&bcap_dev->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&bcap_dev->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
}
}
@@ -392,18 +399,19 @@ static irqreturn_t bcap_isr(int irq, void *dev_id)
{
struct ppi_if *ppi = dev_id;
struct bcap_device *bcap_dev = ppi->priv;
- struct vb2_buffer *vb = &bcap_dev->cur_frm->vb;
+ struct vb2_v4l2_buffer *vbuf = &bcap_dev->cur_frm->vb;
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
dma_addr_t addr;
spin_lock(&bcap_dev->lock);
if (!list_empty(&bcap_dev->dma_queue)) {
- v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
+ v4l2_get_timestamp(&vbuf->timestamp);
if (ppi->err) {
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
ppi->err = false;
} else {
- vb->v4l2_buf.sequence = bcap_dev->sequence++;
+ vbuf->sequence = bcap_dev->sequence++;
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
}
bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
@@ -420,7 +428,8 @@ static irqreturn_t bcap_isr(int irq, void *dev_id)
if (bcap_dev->stop) {
complete(&bcap_dev->comp);
} else {
- addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(
+ &bcap_dev->cur_frm->vb.vb2_buf, 0);
ppi->ops->update_addr(ppi, (unsigned long)addr);
ppi->ops->start(ppi);
}
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index fd7819d8922d..654e964f84a2 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -25,7 +25,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-vmalloc.h>
@@ -179,31 +179,32 @@ static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx)
}
static int coda_bitstream_queue(struct coda_ctx *ctx,
- struct vb2_buffer *src_buf)
+ struct vb2_v4l2_buffer *src_buf)
{
- u32 src_size = vb2_get_plane_payload(src_buf, 0);
+ u32 src_size = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
u32 n;
- n = kfifo_in(&ctx->bitstream_fifo, vb2_plane_vaddr(src_buf, 0),
- src_size);
+ n = kfifo_in(&ctx->bitstream_fifo,
+ vb2_plane_vaddr(&src_buf->vb2_buf, 0), src_size);
if (n < src_size)
return -ENOSPC;
- src_buf->v4l2_buf.sequence = ctx->qsequence++;
+ src_buf->sequence = ctx->qsequence++;
return 0;
}
static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
- struct vb2_buffer *src_buf)
+ struct vb2_v4l2_buffer *src_buf)
{
int ret;
if (coda_get_bitstream_payload(ctx) +
- vb2_get_plane_payload(src_buf, 0) + 512 >= ctx->bitstream.size)
+ vb2_get_plane_payload(&src_buf->vb2_buf, 0) + 512 >=
+ ctx->bitstream.size)
return false;
- if (vb2_plane_vaddr(src_buf, 0) == NULL) {
+ if (vb2_plane_vaddr(&src_buf->vb2_buf, 0) == NULL) {
v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n");
return true;
}
@@ -224,7 +225,7 @@ static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
{
- struct vb2_buffer *src_buf;
+ struct vb2_v4l2_buffer *src_buf;
struct coda_buffer_meta *meta;
unsigned long flags;
u32 start;
@@ -257,7 +258,7 @@ void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
}
/* Dump empty buffers */
- if (!vb2_get_plane_payload(src_buf, 0)) {
+ if (!vb2_get_plane_payload(&src_buf->vb2_buf, 0)) {
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
continue;
@@ -276,9 +277,9 @@ void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
meta = kmalloc(sizeof(*meta), GFP_KERNEL);
if (meta) {
- meta->sequence = src_buf->v4l2_buf.sequence;
- meta->timecode = src_buf->v4l2_buf.timecode;
- meta->timestamp = src_buf->v4l2_buf.timestamp;
+ meta->sequence = src_buf->sequence;
+ meta->timecode = src_buf->timecode;
+ meta->timestamp = src_buf->timestamp;
meta->start = start;
meta->end = ctx->bitstream_fifo.kfifo.in &
ctx->bitstream_fifo.kfifo.mask;
@@ -483,20 +484,21 @@ err:
return ret;
}
-static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf,
+static int coda_encode_header(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
int header_code, u8 *header, int *size)
{
+ struct vb2_buffer *vb = &buf->vb2_buf;
struct coda_dev *dev = ctx->dev;
size_t bufsize;
int ret;
int i;
if (dev->devtype->product == CODA_960)
- memset(vb2_plane_vaddr(buf, 0), 0, 64);
+ memset(vb2_plane_vaddr(vb, 0), 0, 64);
- coda_write(dev, vb2_dma_contig_plane_dma_addr(buf, 0),
+ coda_write(dev, vb2_dma_contig_plane_dma_addr(vb, 0),
CODA_CMD_ENC_HEADER_BB_START);
- bufsize = vb2_plane_size(buf, 0);
+ bufsize = vb2_plane_size(vb, 0);
if (dev->devtype->product == CODA_960)
bufsize /= 1024;
coda_write(dev, bufsize, CODA_CMD_ENC_HEADER_BB_SIZE);
@@ -509,14 +511,14 @@ static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf,
if (dev->devtype->product == CODA_960) {
for (i = 63; i > 0; i--)
- if (((char *)vb2_plane_vaddr(buf, 0))[i] != 0)
+ if (((char *)vb2_plane_vaddr(vb, 0))[i] != 0)
break;
*size = i + 1;
} else {
*size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) -
coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
}
- memcpy(header, vb2_plane_vaddr(buf, 0), *size);
+ memcpy(header, vb2_plane_vaddr(vb, 0), *size);
return 0;
}
@@ -799,7 +801,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
struct coda_q_data *q_data_src, *q_data_dst;
u32 bitstream_buf, bitstream_size;
- struct vb2_buffer *buf;
+ struct vb2_v4l2_buffer *buf;
int gamma, ret, value;
u32 dst_fourcc;
int num_fb;
@@ -810,7 +812,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
dst_fourcc = q_data_dst->fourcc;
buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- bitstream_buf = vb2_dma_contig_plane_dma_addr(buf, 0);
+ bitstream_buf = vb2_dma_contig_plane_dma_addr(&buf->vb2_buf, 0);
bitstream_size = q_data_dst->sizeimage;
if (!coda_is_initialized(dev)) {
@@ -1185,7 +1187,7 @@ out:
static int coda_prepare_encode(struct coda_ctx *ctx)
{
struct coda_q_data *q_data_src, *q_data_dst;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct coda_dev *dev = ctx->dev;
int force_ipicture;
int quant_param = 0;
@@ -1200,8 +1202,8 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
dst_fourcc = q_data_dst->fourcc;
- src_buf->v4l2_buf.sequence = ctx->osequence;
- dst_buf->v4l2_buf.sequence = ctx->osequence;
+ src_buf->sequence = ctx->osequence;
+ dst_buf->sequence = ctx->osequence;
ctx->osequence++;
/*
@@ -1209,12 +1211,12 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
* frame as IDR. This is a problem for some decoders that can't
* recover when a frame is lost.
*/
- if (src_buf->v4l2_buf.sequence % ctx->params.gop_size) {
- src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
- src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ if (src_buf->sequence % ctx->params.gop_size) {
+ src_buf->flags |= V4L2_BUF_FLAG_PFRAME;
+ src_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME;
} else {
- src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
- src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+ src_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ src_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
}
if (dev->devtype->product == CODA_960)
@@ -1224,9 +1226,9 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
* Copy headers at the beginning of the first frame for H.264 only.
* In MPEG4 they are already copied by the coda.
*/
- if (src_buf->v4l2_buf.sequence == 0) {
+ if (src_buf->sequence == 0) {
pic_stream_buffer_addr =
- vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0) +
ctx->vpu_header_size[0] +
ctx->vpu_header_size[1] +
ctx->vpu_header_size[2];
@@ -1234,20 +1236,21 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
ctx->vpu_header_size[0] -
ctx->vpu_header_size[1] -
ctx->vpu_header_size[2];
- memcpy(vb2_plane_vaddr(dst_buf, 0),
+ memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0),
&ctx->vpu_header[0][0], ctx->vpu_header_size[0]);
- memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0],
- &ctx->vpu_header[1][0], ctx->vpu_header_size[1]);
- memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0] +
- ctx->vpu_header_size[1], &ctx->vpu_header[2][0],
- ctx->vpu_header_size[2]);
+ memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0)
+ + ctx->vpu_header_size[0], &ctx->vpu_header[1][0],
+ ctx->vpu_header_size[1]);
+ memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0)
+ + ctx->vpu_header_size[0] + ctx->vpu_header_size[1],
+ &ctx->vpu_header[2][0], ctx->vpu_header_size[2]);
} else {
pic_stream_buffer_addr =
- vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
pic_stream_buffer_size = q_data_dst->sizeimage;
}
- if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) {
+ if (src_buf->flags & V4L2_BUF_FLAG_KEYFRAME) {
force_ipicture = 1;
switch (dst_fourcc) {
case V4L2_PIX_FMT_H264:
@@ -1324,7 +1327,7 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
static void coda_finish_encode(struct coda_ctx *ctx)
{
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct coda_dev *dev = ctx->dev;
u32 wr_ptr, start_ptr;
@@ -1338,13 +1341,13 @@ static void coda_finish_encode(struct coda_ctx *ctx)
wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
/* Calculate bytesused field */
- if (dst_buf->v4l2_buf.sequence == 0) {
- vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr +
+ if (dst_buf->sequence == 0) {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
ctx->vpu_header_size[0] +
ctx->vpu_header_size[1] +
ctx->vpu_header_size[2]);
} else {
- vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr);
}
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n",
@@ -1354,18 +1357,18 @@ static void coda_finish_encode(struct coda_ctx *ctx)
coda_read(dev, CODA_RET_ENC_PIC_FLAG);
if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) {
- dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
- dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+ dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ dst_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
} else {
- dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
- dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
+ dst_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME;
}
- dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
- dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_buf->v4l2_buf.flags |=
- src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
+ dst_buf->timestamp = src_buf->timestamp;
+ dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->flags |=
+ src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->timecode = src_buf->timecode;
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
@@ -1378,8 +1381,8 @@ static void coda_finish_encode(struct coda_ctx *ctx)
v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
"job finished: encoding frame (%d) (%s)\n",
- dst_buf->v4l2_buf.sequence,
- (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
+ dst_buf->sequence,
+ (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ?
"KEYFRAME" : "PFRAME");
}
@@ -1716,7 +1719,7 @@ static int coda_start_decoding(struct coda_ctx *ctx)
static int coda_prepare_decode(struct coda_ctx *ctx)
{
- struct vb2_buffer *dst_buf;
+ struct vb2_v4l2_buffer *dst_buf;
struct coda_dev *dev = ctx->dev;
struct coda_q_data *q_data_dst;
struct coda_buffer_meta *meta;
@@ -1763,7 +1766,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
* well as the rotator buffer output.
* ROT_INDEX needs to be < 0x40, but > ctx->num_internal_frames.
*/
- coda_write(dev, CODA_MAX_FRAMEBUFFERS + dst_buf->v4l2_buf.index,
+ coda_write(dev, CODA_MAX_FRAMEBUFFERS + dst_buf->vb2_buf.index,
CODA9_CMD_DEC_PIC_ROT_INDEX);
reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y;
@@ -1838,7 +1841,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
struct coda_dev *dev = ctx->dev;
struct coda_q_data *q_data_src;
struct coda_q_data *q_data_dst;
- struct vb2_buffer *dst_buf;
+ struct vb2_v4l2_buffer *dst_buf;
struct coda_buffer_meta *meta;
unsigned long payload;
unsigned long flags;
@@ -2029,15 +2032,15 @@ static void coda_finish_decode(struct coda_ctx *ctx)
if (ctx->display_idx >= 0 &&
ctx->display_idx < ctx->num_internal_frames) {
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- dst_buf->v4l2_buf.sequence = ctx->osequence++;
+ dst_buf->sequence = ctx->osequence++;
- dst_buf->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
+ dst_buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
V4L2_BUF_FLAG_PFRAME |
V4L2_BUF_FLAG_BFRAME);
- dst_buf->v4l2_buf.flags |= ctx->frame_types[ctx->display_idx];
+ dst_buf->flags |= ctx->frame_types[ctx->display_idx];
meta = &ctx->frame_metas[ctx->display_idx];
- dst_buf->v4l2_buf.timecode = meta->timecode;
- dst_buf->v4l2_buf.timestamp = meta->timestamp;
+ dst_buf->timecode = meta->timecode;
+ dst_buf->timestamp = meta->timestamp;
trace_coda_dec_rot_done(ctx, dst_buf, meta);
@@ -2052,15 +2055,15 @@ static void coda_finish_decode(struct coda_ctx *ctx)
payload = width * height * 2;
break;
}
- vb2_set_plane_payload(dst_buf, 0, payload);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload);
coda_m2m_buf_done(ctx, dst_buf, ctx->frame_errors[display_idx] ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
"job finished: decoding frame (%d) (%s)\n",
- dst_buf->v4l2_buf.sequence,
- (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
+ dst_buf->sequence,
+ (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ?
"KEYFRAME" : "PFRAME");
} else {
v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index a4654e0c104d..15516a6e3a39 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -36,7 +36,7 @@
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-vmalloc.h>
@@ -84,9 +84,9 @@ unsigned int coda_read(struct coda_dev *dev, u32 reg)
}
void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
- struct vb2_buffer *buf, unsigned int reg_y)
+ struct vb2_v4l2_buffer *buf, unsigned int reg_y)
{
- u32 base_y = vb2_dma_contig_plane_dma_addr(buf, 0);
+ u32 base_y = vb2_dma_contig_plane_dma_addr(&buf->vb2_buf, 0);
u32 base_cb, base_cr;
switch (q_data->fourcc) {
@@ -684,17 +684,17 @@ static int coda_qbuf(struct file *file, void *priv,
}
static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
- struct vb2_buffer *buf)
+ struct vb2_v4l2_buffer *buf)
{
struct vb2_queue *src_vq;
src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) &&
- (buf->v4l2_buf.sequence == (ctx->qsequence - 1)));
+ (buf->sequence == (ctx->qsequence - 1)));
}
-void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_buffer *buf,
+void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
enum vb2_buffer_state state)
{
const struct v4l2_event eos_event = {
@@ -702,7 +702,7 @@ void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_buffer *buf,
};
if (coda_buf_is_end_of_stream(ctx, buf)) {
- buf->v4l2_buf.flags |= V4L2_BUF_FLAG_LAST;
+ buf->flags |= V4L2_BUF_FLAG_LAST;
v4l2_event_queue_fh(&ctx->fh, &eos_event);
}
@@ -1131,8 +1131,7 @@ static void set_default_params(struct coda_ctx *ctx)
/*
* Queue operations
*/
-static int coda_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+static int coda_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -1175,6 +1174,7 @@ static int coda_buf_prepare(struct vb2_buffer *vb)
static void coda_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_queue *vq = vb->vb2_queue;
struct coda_q_data *q_data;
@@ -1193,12 +1193,12 @@ static void coda_buf_queue(struct vb2_buffer *vb)
if (vb2_get_plane_payload(vb, 0) == 0)
coda_bit_stream_end_flag(ctx);
mutex_lock(&ctx->bitstream_mutex);
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
if (vb2_is_streaming(vb->vb2_queue))
coda_fill_bitstream(ctx, true);
mutex_unlock(&ctx->bitstream_mutex);
} else {
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
}
@@ -1247,7 +1247,7 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
struct coda_ctx *ctx = vb2_get_drv_priv(q);
struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev;
struct coda_q_data *q_data_src, *q_data_dst;
- struct vb2_buffer *buf;
+ struct vb2_v4l2_buffer *buf;
int ret = 0;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
@@ -1338,7 +1338,7 @@ static void coda_stop_streaming(struct vb2_queue *q)
{
struct coda_ctx *ctx = vb2_get_drv_priv(q);
struct coda_dev *dev = ctx->dev;
- struct vb2_buffer *buf;
+ struct vb2_v4l2_buffer *buf;
unsigned long flags;
bool stop;
diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c
index 11e734bc2cbd..96cd42a0baaf 100644
--- a/drivers/media/platform/coda/coda-jpeg.c
+++ b/drivers/media/platform/coda/coda-jpeg.c
@@ -178,12 +178,12 @@ int coda_jpeg_write_tables(struct coda_ctx *ctx)
return 0;
}
-bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb)
+bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_v4l2_buffer *vb)
{
- void *vaddr = vb2_plane_vaddr(vb, 0);
+ void *vaddr = vb2_plane_vaddr(&vb->vb2_buf, 0);
u16 soi = be16_to_cpup((__be16 *)vaddr);
u16 eoi = be16_to_cpup((__be16 *)(vaddr +
- vb2_get_plane_payload(vb, 0) - 2));
+ vb2_get_plane_payload(&vb->vb2_buf, 0) - 2));
return soi == SOI_MARKER && eoi == EOI_MARKER;
}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 59b2af9c7749..96532b06bd9e 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -24,7 +24,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include "coda_regs.h"
@@ -243,7 +243,7 @@ extern int coda_debug;
void coda_write(struct coda_dev *dev, u32 data, u32 reg);
unsigned int coda_read(struct coda_dev *dev, u32 reg);
void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
- struct vb2_buffer *buf, unsigned int reg_y);
+ struct vb2_v4l2_buffer *buf, unsigned int reg_y);
int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
size_t size, const char *name, struct dentry *parent);
@@ -284,12 +284,12 @@ static inline unsigned int coda_get_bitstream_payload(struct coda_ctx *ctx)
void coda_bit_stream_end_flag(struct coda_ctx *ctx);
-void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_buffer *buf,
+void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
enum vb2_buffer_state state);
int coda_h264_padding(int size, char *p);
-bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb);
+bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_v4l2_buffer *vb);
int coda_jpeg_write_tables(struct coda_ctx *ctx);
void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality);
diff --git a/drivers/media/platform/coda/trace.h b/drivers/media/platform/coda/trace.h
index d9099a0f7c32..f20666a4aa89 100644
--- a/drivers/media/platform/coda/trace.h
+++ b/drivers/media/platform/coda/trace.h
@@ -5,7 +5,7 @@
#define __CODA_TRACE_H__
#include <linux/tracepoint.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include "coda.h"
@@ -49,7 +49,7 @@ TRACE_EVENT(coda_bit_done,
);
DECLARE_EVENT_CLASS(coda_buf_class,
- TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
TP_ARGS(ctx, buf),
@@ -61,7 +61,7 @@ DECLARE_EVENT_CLASS(coda_buf_class,
TP_fast_assign(
__entry->minor = ctx->fh.vdev->minor;
- __entry->index = buf->v4l2_buf.index;
+ __entry->index = buf->vb2_buf.index;
__entry->ctx = ctx->idx;
),
@@ -70,17 +70,17 @@ DECLARE_EVENT_CLASS(coda_buf_class,
);
DEFINE_EVENT(coda_buf_class, coda_enc_pic_run,
- TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
TP_ARGS(ctx, buf)
);
DEFINE_EVENT(coda_buf_class, coda_enc_pic_done,
- TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
TP_ARGS(ctx, buf)
);
DECLARE_EVENT_CLASS(coda_buf_meta_class,
- TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
struct coda_buffer_meta *meta),
TP_ARGS(ctx, buf, meta),
@@ -95,7 +95,7 @@ DECLARE_EVENT_CLASS(coda_buf_meta_class,
TP_fast_assign(
__entry->minor = ctx->fh.vdev->minor;
- __entry->index = buf->v4l2_buf.index;
+ __entry->index = buf->vb2_buf.index;
__entry->start = meta->start;
__entry->end = meta->end;
__entry->ctx = ctx->idx;
@@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(coda_buf_meta_class,
);
DEFINE_EVENT(coda_buf_meta_class, coda_bit_queue,
- TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
struct coda_buffer_meta *meta),
TP_ARGS(ctx, buf, meta)
);
@@ -146,7 +146,7 @@ DEFINE_EVENT(coda_meta_class, coda_dec_pic_done,
);
DEFINE_EVENT(coda_buf_meta_class, coda_dec_rot_done,
- TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
struct coda_buffer_meta *meta),
TP_ARGS(ctx, buf, meta)
);
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index f69cdd7da10c..6d91422c4e4c 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -74,8 +74,8 @@ static void vpbe_isr_even_field(struct vpbe_display *disp_obj,
if (layer->cur_frm == layer->next_frm)
return;
- v4l2_get_timestamp(&layer->cur_frm->vb.v4l2_buf.timestamp);
- vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&layer->cur_frm->vb.timestamp);
+ vb2_buffer_done(&layer->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
/* Make cur_frm pointing to next_frm */
layer->cur_frm = layer->next_frm;
}
@@ -104,8 +104,8 @@ static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
list_del(&layer->next_frm->list);
spin_unlock(&disp_obj->dma_queue_lock);
/* Mark state of the frame to active */
- layer->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
- addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb, 0);
+ layer->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb.vb2_buf, 0);
osd_device->ops.start_layer(osd_device,
layer->layer_info.id,
addr,
@@ -228,11 +228,12 @@ static int vpbe_buffer_prepare(struct vb2_buffer *vb)
* This function allocates memory for the buffers
*/
static int
-vpbe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+vpbe_buffer_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
/* Get the file handle object and layer object */
struct vpbe_layer *layer = vb2_get_drv_priv(vq);
struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
@@ -259,8 +260,9 @@ vpbe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
*/
static void vpbe_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
/* Get the file handle object and layer object */
- struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer *buf = container_of(vbuf,
struct vpbe_disp_buffer, vb);
struct vpbe_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
struct vpbe_display *disp = layer->disp_dev;
@@ -290,7 +292,7 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
/* Remove buffer from the buffer queue */
list_del(&layer->cur_frm->list);
/* Mark state of the current frame to active */
- layer->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ layer->cur_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
/* Initialize field_id and started member */
layer->field_id = 0;
@@ -299,10 +301,12 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret < 0) {
struct vpbe_disp_buffer *buf, *tmp;
- vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
list_for_each_entry_safe(buf, tmp, &layer->dma_queue, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
}
return ret;
@@ -332,13 +336,14 @@ static void vpbe_stop_streaming(struct vb2_queue *vq)
/* release all active buffers */
spin_lock_irqsave(&disp->dma_queue_lock, flags);
if (layer->cur_frm == layer->next_frm) {
- vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
} else {
if (layer->cur_frm != NULL)
- vb2_buffer_done(&layer->cur_frm->vb,
+ vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
if (layer->next_frm != NULL)
- vb2_buffer_done(&layer->next_frm->vb,
+ vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
@@ -346,7 +351,8 @@ static void vpbe_stop_streaming(struct vb2_queue *vq)
layer->next_frm = list_entry(layer->dma_queue.next,
struct vpbe_disp_buffer, list);
list_del(&layer->next_frm->list);
- vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
}
@@ -383,7 +389,7 @@ static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
unsigned long addr;
int ret;
- addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb.vb2_buf, 0);
/* Set address in the display registers */
osd_device->ops.start_layer(osd_device,
layer->layer_info.id,
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a5f548138b91..c1e573b7cc6f 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -57,7 +57,8 @@ static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] = { {1, 1} };
/* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */
static int ycmux_mode;
-static inline struct vpif_cap_buffer *to_vpif_buffer(struct vb2_buffer *vb)
+static inline
+struct vpif_cap_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb)
{
return container_of(vb, struct vpif_cap_buffer, vb);
}
@@ -72,6 +73,7 @@ static inline struct vpif_cap_buffer *to_vpif_buffer(struct vb2_buffer *vb)
*/
static int vpif_buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *q = vb->vb2_queue;
struct channel_obj *ch = vb2_get_drv_priv(q);
struct common_obj *common;
@@ -85,7 +87,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
return -EINVAL;
- vb->v4l2_buf.field = common->fmt.fmt.pix.field;
+ vbuf->field = common->fmt.fmt.pix.field;
addr = vb2_dma_contig_plane_dma_addr(vb, 0);
if (!IS_ALIGNED((addr + common->ytop_off), 8) ||
@@ -112,10 +114,11 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
* the buffer count and buffer size
*/
static int vpif_buffer_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct channel_obj *ch = vb2_get_drv_priv(vq);
struct common_obj *common;
@@ -145,8 +148,9 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq,
*/
static void vpif_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
- struct vpif_cap_buffer *buf = to_vpif_buffer(vb);
+ struct vpif_cap_buffer *buf = to_vpif_buffer(vbuf);
struct common_obj *common;
unsigned long flags;
@@ -214,7 +218,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
list_del(&common->cur_frm->list);
spin_unlock_irqrestore(&common->irqlock, flags);
- addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0);
common->set_addr(addr + common->ytop_off,
addr + common->ybtm_off,
@@ -243,7 +247,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
err:
list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
}
spin_unlock_irqrestore(&common->irqlock, flags);
@@ -286,13 +290,14 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
/* release all active buffers */
spin_lock_irqsave(&common->irqlock, flags);
if (common->cur_frm == common->next_frm) {
- vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
} else {
if (common->cur_frm != NULL)
- vb2_buffer_done(&common->cur_frm->vb,
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
if (common->next_frm != NULL)
- vb2_buffer_done(&common->next_frm->vb,
+ vb2_buffer_done(&common->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
@@ -300,7 +305,8 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
list_del(&common->next_frm->list);
- vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&common->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&common->irqlock, flags);
}
@@ -325,9 +331,8 @@ static struct vb2_ops video_qops = {
*/
static void vpif_process_buffer_complete(struct common_obj *common)
{
- v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp);
- vb2_buffer_done(&common->cur_frm->vb,
- VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&common->cur_frm->vb.timestamp);
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
/* Make curFrm pointing to nextFrm */
common->cur_frm = common->next_frm;
}
@@ -350,7 +355,7 @@ static void vpif_schedule_next_buffer(struct common_obj *common)
/* Remove that buffer from the buffer queue */
list_del(&common->next_frm->list);
spin_unlock(&common->irqlock);
- addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0);
/* Set top and bottom field addresses in VPIF registers */
common->set_addr(addr + common->ytop_off,
diff --git a/drivers/media/platform/davinci/vpif_capture.h b/drivers/media/platform/davinci/vpif_capture.h
index 8b8a663f6b22..4a7600929b61 100644
--- a/drivers/media/platform/davinci/vpif_capture.h
+++ b/drivers/media/platform/davinci/vpif_capture.h
@@ -52,7 +52,7 @@ struct video_obj {
};
struct vpif_cap_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 682e5d578bf7..fd2780306c17 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -53,7 +53,8 @@ static struct device *vpif_dev;
static void vpif_calculate_offsets(struct channel_obj *ch);
static void vpif_config_addr(struct channel_obj *ch, int muxmode);
-static inline struct vpif_disp_buffer *to_vpif_buffer(struct vb2_buffer *vb)
+static inline
+struct vpif_disp_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb)
{
return container_of(vb, struct vpif_disp_buffer, vb);
}
@@ -68,6 +69,7 @@ static inline struct vpif_disp_buffer *to_vpif_buffer(struct vb2_buffer *vb)
*/
static int vpif_buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
struct common_obj *common;
@@ -77,7 +79,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
return -EINVAL;
- vb->v4l2_buf.field = common->fmt.fmt.pix.field;
+ vbuf->field = common->fmt.fmt.pix.field;
if (vb->vb2_queue->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
unsigned long addr = vb2_dma_contig_plane_dma_addr(vb, 0);
@@ -107,10 +109,11 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
* the buffer count and buffer size
*/
static int vpif_buffer_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct channel_obj *ch = vb2_get_drv_priv(vq);
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
@@ -138,7 +141,8 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq,
*/
static void vpif_buffer_queue(struct vb2_buffer *vb)
{
- struct vpif_disp_buffer *buf = to_vpif_buffer(vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpif_disp_buffer *buf = to_vpif_buffer(vbuf);
struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
struct common_obj *common;
unsigned long flags;
@@ -197,7 +201,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
list_del(&common->cur_frm->list);
spin_unlock_irqrestore(&common->irqlock, flags);
- addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0);
common->set_addr((addr + common->ytop_off),
(addr + common->ybtm_off),
(addr + common->ctop_off),
@@ -229,7 +233,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
err:
list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
}
spin_unlock_irqrestore(&common->irqlock, flags);
@@ -264,13 +268,14 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
/* release all active buffers */
spin_lock_irqsave(&common->irqlock, flags);
if (common->cur_frm == common->next_frm) {
- vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
} else {
if (common->cur_frm != NULL)
- vb2_buffer_done(&common->cur_frm->vb,
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
if (common->next_frm != NULL)
- vb2_buffer_done(&common->next_frm->vb,
+ vb2_buffer_done(&common->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
@@ -278,7 +283,8 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_disp_buffer, list);
list_del(&common->next_frm->list);
- vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&common->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&common->irqlock, flags);
}
@@ -306,7 +312,7 @@ static void process_progressive_mode(struct common_obj *common)
spin_unlock(&common->irqlock);
/* Set top and bottom field addrs in VPIF registers */
- addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0);
common->set_addr(addr + common->ytop_off,
addr + common->ybtm_off,
addr + common->ctop_off,
@@ -324,10 +330,10 @@ static void process_interlaced_mode(int fid, struct common_obj *common)
/* one frame is displayed If next frame is
* available, release cur_frm and move on */
/* Copy frame display time */
- v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&common->cur_frm->vb.timestamp);
/* Change status of the cur_frm */
- vb2_buffer_done(&common->cur_frm->vb,
- VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_DONE);
/* Make cur_frm pointing to next_frm */
common->cur_frm = common->next_frm;
@@ -380,10 +386,10 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
if (!channel_first_int[i][channel_id]) {
/* Mark status of the cur_frm to
* done and unlock semaphore on it */
- v4l2_get_timestamp(&common->cur_frm->vb.
- v4l2_buf.timestamp);
- vb2_buffer_done(&common->cur_frm->vb,
- VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(
+ &common->cur_frm->vb.timestamp);
+ vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_DONE);
/* Make cur_frm pointing to next_frm */
common->cur_frm = common->next_frm;
}
diff --git a/drivers/media/platform/davinci/vpif_display.h b/drivers/media/platform/davinci/vpif_display.h
index 849e0e385f18..e7a1723a1b7a 100644
--- a/drivers/media/platform/davinci/vpif_display.h
+++ b/drivers/media/platform/davinci/vpif_display.h
@@ -62,7 +62,7 @@ struct video_obj {
};
struct vpif_disp_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index fa572aacdb3f..e93a2336cfa2 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -19,7 +19,7 @@
#include <linux/videodev2.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mem2mem.h>
@@ -136,7 +136,7 @@ struct gsc_fmt {
* @idx : index of G-Scaler input buffer
*/
struct gsc_input_buf {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
int idx;
};
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index d5cffef2e227..d82e717acba7 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -77,7 +77,7 @@ static void gsc_m2m_stop_streaming(struct vb2_queue *q)
void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
{
- struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
if (!ctx || !ctx->m2m_ctx)
return;
@@ -86,11 +86,11 @@ void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
if (src_vb && dst_vb) {
- dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
- dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
- dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_vb->v4l2_buf.flags |=
- src_vb->v4l2_buf.flags
+ dst_vb->timestamp = src_vb->timestamp;
+ dst_vb->timecode = src_vb->timecode;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags
& V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
v4l2_m2m_buf_done(src_vb, vb_state);
@@ -109,23 +109,23 @@ static void gsc_m2m_job_abort(void *priv)
static int gsc_get_bufs(struct gsc_ctx *ctx)
{
struct gsc_frame *s_frame, *d_frame;
- struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
int ret;
s_frame = &ctx->s_frame;
d_frame = &ctx->d_frame;
src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- ret = gsc_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
+ ret = gsc_prepare_addr(ctx, &src_vb->vb2_buf, s_frame, &s_frame->addr);
if (ret)
return ret;
dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- ret = gsc_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);
+ ret = gsc_prepare_addr(ctx, &dst_vb->vb2_buf, d_frame, &d_frame->addr);
if (ret)
return ret;
- dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+ dst_vb->timestamp = src_vb->timestamp;
return 0;
}
@@ -212,7 +212,7 @@ put_device:
}
static int gsc_m2m_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *allocators[])
{
@@ -255,12 +255,13 @@ static int gsc_m2m_buf_prepare(struct vb2_buffer *vb)
static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
if (ctx->m2m_ctx)
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
}
static struct vb2_ops gsc_m2m_qops = {
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index cfebf292e15a..99e57320e6f7 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -24,7 +24,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "common.h"
@@ -103,7 +103,7 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
/* Release unused buffers */
while (!suspend && !list_empty(&cap->pending_buf_q)) {
buf = fimc_pending_queue_pop(cap);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
/* If suspending put unused buffers onto pending queue */
while (!list_empty(&cap->active_buf_q)) {
@@ -111,7 +111,7 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
if (suspend)
fimc_pending_queue_add(cap, buf);
else
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
fimc_hw_reset(fimc);
@@ -183,8 +183,6 @@ void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
struct v4l2_subdev *csis = p->subdevs[IDX_CSIS];
struct fimc_frame *f = &cap->ctx->d_frame;
struct fimc_vid_buffer *v_buf;
- struct timeval *tv;
- struct timespec ts;
if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
wake_up(&fimc->irq_queue);
@@ -193,16 +191,12 @@ void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
if (!list_empty(&cap->active_buf_q) &&
test_bit(ST_CAPT_RUN, &fimc->state) && deq_buf) {
- ktime_get_real_ts(&ts);
-
v_buf = fimc_active_queue_pop(cap);
- tv = &v_buf->vb.v4l2_buf.timestamp;
- tv->tv_sec = ts.tv_sec;
- tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- v_buf->vb.v4l2_buf.sequence = cap->frame_count++;
+ v4l2_get_timestamp(&v_buf->vb.timestamp);
+ v_buf->vb.sequence = cap->frame_count++;
- vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&v_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
if (!list_empty(&cap->pending_buf_q)) {
@@ -233,7 +227,7 @@ void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
list_for_each_entry(v_buf, &cap->active_buf_q, list) {
if (v_buf->index != index)
continue;
- vaddr = vb2_plane_vaddr(&v_buf->vb, plane);
+ vaddr = vb2_plane_vaddr(&v_buf->vb.vb2_buf, plane);
v4l2_subdev_call(csis, video, s_rx_buffer,
vaddr, &size);
break;
@@ -338,16 +332,17 @@ int fimc_capture_resume(struct fimc_dev *fimc)
if (list_empty(&vid_cap->pending_buf_q))
break;
buf = fimc_pending_queue_pop(vid_cap);
- buffer_queue(&buf->vb);
+ buffer_queue(&buf->vb.vb2_buf);
}
return 0;
}
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+static int queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *allocators[])
{
+ const struct v4l2_format *pfmt = parg;
const struct v4l2_pix_format_mplane *pixm = NULL;
struct fimc_ctx *ctx = vq->drv_priv;
struct fimc_frame *frame = &ctx->d_frame;
@@ -410,8 +405,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct fimc_vid_buffer *buf
- = container_of(vb, struct fimc_vid_buffer, vb);
+ = container_of(vbuf, struct fimc_vid_buffer, vb);
struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
@@ -420,7 +416,7 @@ static void buffer_queue(struct vb2_buffer *vb)
int min_bufs;
spin_lock_irqsave(&fimc->slock, flags);
- fimc_prepare_addr(ctx, &buf->vb, &ctx->d_frame, &buf->paddr);
+ fimc_prepare_addr(ctx, &buf->vb.vb2_buf, &ctx->d_frame, &buf->paddr);
if (!test_bit(ST_CAPT_SUSPENDED, &fimc->state) &&
!test_bit(ST_CAPT_STREAM, &fimc->state) &&
@@ -1472,7 +1468,8 @@ void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
if (!list_empty(&fimc->vid_cap.active_buf_q)) {
buf = list_entry(fimc->vid_cap.active_buf_q.next,
struct fimc_vid_buffer, list);
- vb2_set_plane_payload(&buf->vb, 0, *((u32 *)arg));
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0,
+ *((u32 *)arg));
}
fimc_capture_irq_handler(fimc, 1);
fimc_deactivate_capture(fimc);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index 1101c41ac117..cef2a7f07cdb 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -27,7 +27,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "fimc-core.h"
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h
index 7328f0845065..d336fa2916df 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/exynos4-is/fimc-core.h
@@ -22,7 +22,7 @@
#include <linux/sizes.h>
#include <media/media-entity.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mem2mem.h>
@@ -224,7 +224,7 @@ struct fimc_addr {
* @index: buffer index for the output DMA engine
*/
struct fimc_vid_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
struct fimc_addr paddr;
int index;
diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/exynos4-is/fimc-is.h
index e0be691af2d3..386eb49ece7e 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.h
+++ b/drivers/media/platform/exynos4-is/fimc-is.h
@@ -22,7 +22,7 @@
#include <linux/sizes.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/v4l2-ctrls.h>
#include "fimc-isp.h"
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 76b6b4d14616..6e6648446f00 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -28,7 +28,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/exynos-fimc.h>
@@ -39,10 +39,11 @@
#include "fimc-is-param.h"
static int isp_video_capture_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *pfmt,
+ const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *allocators[])
{
+ const struct v4l2_format *pfmt = parg;
struct fimc_isp *isp = vb2_get_drv_priv(vq);
struct v4l2_pix_format_mplane *vid_fmt = &isp->video_capture.pixfmt;
const struct v4l2_pix_format_mplane *pixm = NULL;
@@ -194,10 +195,11 @@ static int isp_video_capture_buffer_prepare(struct vb2_buffer *vb)
static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
struct fimc_is_video *video = &isp->video_capture;
struct fimc_is *is = fimc_isp_to_is(isp);
- struct isp_video_buf *ivb = to_isp_video_buf(vb);
+ struct isp_video_buf *ivb = to_isp_video_buf(vbuf);
unsigned long flags;
unsigned int i;
@@ -220,7 +222,7 @@ static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
isp_dbg(2, &video->ve.vdev,
"dma_buf %pad (%d/%d/%d) addr: %pad\n",
- &buf_index, ivb->index, i, vb->v4l2_buf.index,
+ &buf_index, ivb->index, i, vb->index,
&ivb->dma_addr[i]);
}
@@ -242,7 +244,7 @@ static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
void fimc_isp_video_irq_handler(struct fimc_is *is)
{
struct fimc_is_video *video = &is->isp.video_capture;
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vbuf;
int buf_index;
/* TODO: Ensure the DMA is really stopped in stop_streaming callback */
@@ -250,10 +252,10 @@ void fimc_isp_video_irq_handler(struct fimc_is *is)
return;
buf_index = (is->i2h_cmd.args[1] - 1) % video->buf_count;
- vb = &video->buffers[buf_index]->vb;
+ vbuf = &video->buffers[buf_index]->vb;
- v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&vbuf->timestamp);
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
video->buf_mask &= ~BIT(buf_index);
fimc_is_hw_set_isp_buf_mask(is, video->buf_mask);
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.h b/drivers/media/platform/exynos4-is/fimc-isp-video.h
index 98c662654bb6..f79a1b348aa6 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.h
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.h
@@ -11,7 +11,7 @@
#ifndef FIMC_ISP_VIDEO__
#define FIMC_ISP_VIDEO__
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include "fimc-isp.h"
#ifdef CONFIG_VIDEO_EXYNOS4_ISP_DMA_CAPTURE
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.h b/drivers/media/platform/exynos4-is/fimc-isp.h
index b99be09b49fc..c2d25df85db9 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.h
+++ b/drivers/media/platform/exynos4-is/fimc-isp.h
@@ -21,7 +21,7 @@
#include <linux/videodev2.h>
#include <media/media-entity.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mediabus.h>
#include <media/exynos-fimc.h>
@@ -102,7 +102,7 @@ struct fimc_isp_ctrls {
};
struct isp_video_buf {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
dma_addr_t dma_addr[FIMC_ISP_MAX_PLANES];
unsigned int index;
};
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index ca6261a86a5f..60660c3a5de0 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -28,7 +28,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/exynos-fimc.h>
@@ -200,7 +200,7 @@ static int fimc_lite_reinit(struct fimc_lite *fimc, bool suspend)
/* Release unused buffers */
while (!suspend && !list_empty(&fimc->pending_buf_q)) {
buf = fimc_lite_pending_queue_pop(fimc);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
/* If suspending put unused buffers onto pending queue */
while (!list_empty(&fimc->active_buf_q)) {
@@ -208,7 +208,7 @@ static int fimc_lite_reinit(struct fimc_lite *fimc, bool suspend)
if (suspend)
fimc_lite_pending_queue_add(fimc, buf);
else
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&fimc->slock, flags);
@@ -254,8 +254,6 @@ static irqreturn_t flite_irq_handler(int irq, void *priv)
struct fimc_lite *fimc = priv;
struct flite_buffer *vbuf;
unsigned long flags;
- struct timeval *tv;
- struct timespec ts;
u32 intsrc;
spin_lock_irqsave(&fimc->slock, flags);
@@ -294,13 +292,10 @@ static irqreturn_t flite_irq_handler(int irq, void *priv)
test_bit(ST_FLITE_RUN, &fimc->state) &&
!list_empty(&fimc->active_buf_q)) {
vbuf = fimc_lite_active_queue_pop(fimc);
- ktime_get_ts(&ts);
- tv = &vbuf->vb.v4l2_buf.timestamp;
- tv->tv_sec = ts.tv_sec;
- tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- vbuf->vb.v4l2_buf.sequence = fimc->frame_count++;
+ v4l2_get_timestamp(&vbuf->vb.timestamp);
+ vbuf->vb.sequence = fimc->frame_count++;
flite_hw_mask_dma_buffer(fimc, vbuf->index);
- vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&vbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
if (test_bit(ST_FLITE_CONFIG, &fimc->state))
@@ -360,10 +355,11 @@ static void stop_streaming(struct vb2_queue *q)
fimc_lite_stop_capture(fimc, false);
}
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+static int queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *allocators[])
{
+ const struct v4l2_format *pfmt = parg;
const struct v4l2_pix_format_mplane *pixm = NULL;
struct fimc_lite *fimc = vq->drv_priv;
struct flite_frame *frame = &fimc->out_frame;
@@ -422,8 +418,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct flite_buffer *buf
- = container_of(vb, struct flite_buffer, vb);
+ = container_of(vbuf, struct flite_buffer, vb);
struct fimc_lite *fimc = vb2_get_drv_priv(vb->vb2_queue);
unsigned long flags;
@@ -1637,7 +1634,7 @@ static int fimc_lite_resume(struct device *dev)
if (list_empty(&fimc->pending_buf_q))
break;
buf = fimc_lite_pending_queue_pop(fimc);
- buffer_queue(&buf->vb);
+ buffer_queue(&buf->vb.vb2_buf);
}
return 0;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.h b/drivers/media/platform/exynos4-is/fimc-lite.h
index ea19dc7be63e..b302305dedbe 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.h
+++ b/drivers/media/platform/exynos4-is/fimc-lite.h
@@ -19,7 +19,7 @@
#include <linux/videodev2.h>
#include <media/media-entity.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mediabus.h>
@@ -100,7 +100,7 @@ struct flite_frame {
* @index: DMA start address register's index
*/
struct flite_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
dma_addr_t paddr;
unsigned short index;
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index d2bfe7c2a6b4..4d1d64a46b21 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -24,7 +24,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "common.h"
@@ -42,7 +42,7 @@ static unsigned int get_m2m_fmt_flags(unsigned int stream_type)
void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
{
- struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
if (!ctx || !ctx->fh.m2m_ctx)
return;
@@ -99,7 +99,7 @@ static void stop_streaming(struct vb2_queue *q)
static void fimc_device_run(void *priv)
{
- struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
struct fimc_ctx *ctx = priv;
struct fimc_frame *sf, *df;
struct fimc_dev *fimc;
@@ -123,19 +123,19 @@ static void fimc_device_run(void *priv)
}
src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- ret = fimc_prepare_addr(ctx, src_vb, sf, &sf->paddr);
+ ret = fimc_prepare_addr(ctx, &src_vb->vb2_buf, sf, &sf->paddr);
if (ret)
goto dma_unlock;
dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- ret = fimc_prepare_addr(ctx, dst_vb, df, &df->paddr);
+ ret = fimc_prepare_addr(ctx, &dst_vb->vb2_buf, df, &df->paddr);
if (ret)
goto dma_unlock;
- dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
- dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_vb->v4l2_buf.flags |=
- src_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->timestamp = src_vb->timestamp;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
/* Reconfigure hardware if the context has changed. */
if (fimc->m2m.ctx != ctx) {
@@ -176,7 +176,7 @@ static void fimc_job_abort(void *priv)
fimc_m2m_shutdown(priv);
}
-static int fimc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int fimc_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *allocators[])
{
@@ -220,8 +220,9 @@ static int fimc_buf_prepare(struct vb2_buffer *vb)
static void fimc_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static struct vb2_ops fimc_qops = {
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index d74e1bec3d86..4b85105dc159 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -706,7 +706,8 @@ static irqreturn_t s5pcsis_irq_handler(int irq, void *dev_id)
else
offset = S5PCSIS_PKTDATA_ODD;
- memcpy(pktbuf->data, state->regs + offset, pktbuf->len);
+ memcpy(pktbuf->data, (u8 __force *)state->regs + offset,
+ pktbuf->len);
pktbuf->data = NULL;
rmb();
}
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index c07f367aa436..29973f9bf8db 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -200,18 +200,18 @@ static void dma_callback(void *data)
{
struct deinterlace_ctx *curr_ctx = data;
struct deinterlace_dev *pcdev = curr_ctx->dev;
- struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
atomic_set(&pcdev->busy, 0);
src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
- dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
- dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_vb->v4l2_buf.flags |=
- src_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
+ dst_vb->timestamp = src_vb->timestamp;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->timecode = src_vb->timecode;
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
@@ -225,7 +225,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
int do_callback)
{
struct deinterlace_q_data *s_q_data;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct deinterlace_dev *pcdev = ctx->dev;
struct dma_chan *chan = pcdev->dma_chan;
struct dma_device *dmadev = chan->device;
@@ -243,8 +243,9 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
s_height = s_q_data->height;
s_size = s_width * s_height;
- p_in = (dma_addr_t)vb2_dma_contig_plane_dma_addr(src_buf, 0);
- p_out = (dma_addr_t)vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ p_in = (dma_addr_t)vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ p_out = (dma_addr_t)vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf,
+ 0);
if (!p_in || !p_out) {
v4l2_err(&pcdev->v4l2_dev,
"Acquiring kernel pointers to buffers failed\n");
@@ -797,7 +798,7 @@ struct vb2_dc_conf {
};
static int deinterlace_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -849,8 +850,10 @@ static int deinterlace_buf_prepare(struct vb2_buffer *vb)
static void deinterlace_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
}
static struct vb2_ops deinterlace_qops = {
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 5e2b4df48b3c..aa2b44041d3f 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -201,18 +201,18 @@ struct mcam_dma_desc {
/*
* Our buffer type for working with videobuf2. Note that the vb2
- * developers have decreed that struct vb2_buffer must be at the
+ * developers have decreed that struct vb2_v4l2_buffer must be at the
* beginning of this structure.
*/
struct mcam_vb_buffer {
- struct vb2_buffer vb_buf;
+ struct vb2_v4l2_buffer vb_buf;
struct list_head queue;
struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
dma_addr_t dma_desc_pa; /* Descriptor physical address */
int dma_desc_nent; /* Number of mapped descriptors */
};
-static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb)
+static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
{
return container_of(vb, struct mcam_vb_buffer, vb_buf);
}
@@ -221,14 +221,14 @@ static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb)
* Hand a completed buffer back to user space.
*/
static void mcam_buffer_done(struct mcam_camera *cam, int frame,
- struct vb2_buffer *vbuf)
+ struct vb2_v4l2_buffer *vbuf)
{
- vbuf->v4l2_buf.bytesused = cam->pix_format.sizeimage;
- vbuf->v4l2_buf.sequence = cam->buf_seq[frame];
- vbuf->v4l2_buf.field = V4L2_FIELD_NONE;
- v4l2_get_timestamp(&vbuf->v4l2_buf.timestamp);
- vb2_set_plane_payload(vbuf, 0, cam->pix_format.sizeimage);
- vb2_buffer_done(vbuf, VB2_BUF_STATE_DONE);
+ vbuf->vb2_buf.planes[0].bytesused = cam->pix_format.sizeimage;
+ vbuf->sequence = cam->buf_seq[frame];
+ vbuf->field = V4L2_FIELD_NONE;
+ v4l2_get_timestamp(&vbuf->timestamp);
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, cam->pix_format.sizeimage);
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
}
@@ -482,7 +482,8 @@ static void mcam_frame_tasklet(unsigned long data)
* Drop the lock during the big copy. This *should* be safe...
*/
spin_unlock_irqrestore(&cam->dev_lock, flags);
- memcpy(vb2_plane_vaddr(&buf->vb_buf, 0), cam->dma_bufs[bufno],
+ memcpy(vb2_plane_vaddr(&buf->vb_buf.vb2_buf, 0),
+ cam->dma_bufs[bufno],
cam->pix_format.sizeimage);
mcam_buffer_done(cam, bufno, &buf->vb_buf);
spin_lock_irqsave(&cam->dev_lock, flags);
@@ -548,7 +549,7 @@ static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
{
struct mcam_vb_buffer *buf;
dma_addr_t dma_handle;
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb;
/*
* If there are no available buffers, go into single mode
@@ -570,7 +571,7 @@ static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
cam->vb_bufs[frame] = buf;
vb = &buf->vb_buf;
- dma_handle = vb2_dma_contig_plane_dma_addr(vb, 0);
+ dma_handle = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
mcam_write_yuv_bases(cam, frame, dma_handle);
}
@@ -1048,10 +1049,11 @@ static int mcam_read_setup(struct mcam_camera *cam)
*/
static int mcam_vb_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt, unsigned int *nbufs,
+ const void *parg, unsigned int *nbufs,
unsigned int *num_planes, unsigned int sizes[],
void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct mcam_camera *cam = vb2_get_drv_priv(vq);
int minbufs = (cam->buffer_mode == B_DMA_contig) ? 3 : 2;
@@ -1071,7 +1073,8 @@ static int mcam_vb_queue_setup(struct vb2_queue *vq,
static void mcam_vb_buf_queue(struct vb2_buffer *vb)
{
- struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
unsigned long flags;
int start;
@@ -1096,14 +1099,14 @@ static void mcam_vb_requeue_bufs(struct vb2_queue *vq,
spin_lock_irqsave(&cam->dev_lock, flags);
list_for_each_entry_safe(buf, node, &cam->buffers, queue) {
- vb2_buffer_done(&buf->vb_buf, state);
+ vb2_buffer_done(&buf->vb_buf.vb2_buf, state);
list_del(&buf->queue);
}
for (i = 0; i < MAX_DMA_BUFS; i++) {
buf = cam->vb_bufs[i];
if (buf) {
- vb2_buffer_done(&buf->vb_buf, state);
+ vb2_buffer_done(&buf->vb_buf.vb2_buf, state);
cam->vb_bufs[i] = NULL;
}
}
@@ -1198,7 +1201,8 @@ static const struct vb2_ops mcam_vb2_ops = {
*/
static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
{
- struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
@@ -1214,7 +1218,8 @@ static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
{
- struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
struct mcam_dma_desc *desc = mvb->dma_desc;
struct scatterlist *sg;
@@ -1230,8 +1235,9 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
- struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
dma_free_coherent(cam->dev, ndesc * sizeof(struct mcam_dma_desc),
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index 97167f6ffd1e..35cd9e5aedf8 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -10,7 +10,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
/*
* Create our own symbols for the supported buffer modes, but, for now,
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 87314b743f55..03a1b606655d 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -351,7 +351,7 @@ static irqreturn_t emmaprp_irq(int irq_emma, void *data)
{
struct emmaprp_dev *pcdev = data;
struct emmaprp_ctx *curr_ctx;
- struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
unsigned long flags;
u32 irqst;
@@ -375,13 +375,13 @@ static irqreturn_t emmaprp_irq(int irq_emma, void *data)
src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
- dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
- dst_vb->v4l2_buf.flags &=
+ dst_vb->timestamp = src_vb->timestamp;
+ dst_vb->flags &=
~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_vb->v4l2_buf.flags |=
- src_vb->v4l2_buf.flags
+ dst_vb->flags |=
+ src_vb->flags
& V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
+ dst_vb->timecode = src_vb->timecode;
spin_lock_irqsave(&pcdev->irqlock, flags);
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
@@ -689,7 +689,7 @@ static const struct v4l2_ioctl_ops emmaprp_ioctl_ops = {
* Queue operations
*/
static int emmaprp_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -742,8 +742,9 @@ static int emmaprp_buf_prepare(struct vb2_buffer *vb)
static void emmaprp_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
}
static struct vb2_ops emmaprp_qops = {
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 20434e83e801..94d4c295d3d0 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -235,7 +235,7 @@ static int isp_stat_buf_queue(struct ispstat *stat)
if (!stat->active_buf)
return STAT_NO_BUF;
- ktime_get_ts(&stat->active_buf->ts);
+ v4l2_get_timestamp(&stat->active_buf->ts);
stat->active_buf->buf_size = stat->buf_size;
if (isp_stat_buf_check_magic(stat, stat->active_buf)) {
@@ -496,8 +496,7 @@ int omap3isp_stat_request_statistics(struct ispstat *stat,
return PTR_ERR(buf);
}
- data->ts.tv_sec = buf->ts.tv_sec;
- data->ts.tv_usec = buf->ts.tv_nsec / NSEC_PER_USEC;
+ data->ts = buf->ts;
data->config_counter = buf->config_counter;
data->frame_number = buf->frame_number;
data->buf_size = buf->buf_size;
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/omap3isp/ispstat.h
index b79380d83fcf..6d9b0244f320 100644
--- a/drivers/media/platform/omap3isp/ispstat.h
+++ b/drivers/media/platform/omap3isp/ispstat.h
@@ -39,7 +39,7 @@ struct ispstat_buffer {
struct sg_table sgt;
void *virt_addr;
dma_addr_t dma_addr;
- struct timespec ts;
+ struct timeval ts;
u32 buf_size;
u32 frame_number;
u16 config_counter;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 41bb8df91f72..f4f591652432 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -320,7 +320,7 @@ isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
*/
static int isp_video_queue_setup(struct vb2_queue *queue,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *count, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -342,8 +342,9 @@ static int isp_video_queue_setup(struct vb2_queue *queue,
static int isp_video_buffer_prepare(struct vb2_buffer *buf)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf);
struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
- struct isp_buffer *buffer = to_isp_buffer(buf);
+ struct isp_buffer *buffer = to_isp_buffer(vbuf);
struct isp_video *video = vfh->video;
dma_addr_t addr;
@@ -363,7 +364,8 @@ static int isp_video_buffer_prepare(struct vb2_buffer *buf)
return -EINVAL;
}
- vb2_set_plane_payload(&buffer->vb, 0, vfh->format.fmt.pix.sizeimage);
+ vb2_set_plane_payload(&buffer->vb.vb2_buf, 0,
+ vfh->format.fmt.pix.sizeimage);
buffer->dma = addr;
return 0;
@@ -380,8 +382,9 @@ static int isp_video_buffer_prepare(struct vb2_buffer *buf)
*/
static void isp_video_buffer_queue(struct vb2_buffer *buf)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf);
struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
- struct isp_buffer *buffer = to_isp_buffer(buf);
+ struct isp_buffer *buffer = to_isp_buffer(vbuf);
struct isp_video *video = vfh->video;
struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
enum isp_pipeline_state state;
@@ -392,7 +395,7 @@ static void isp_video_buffer_queue(struct vb2_buffer *buf)
spin_lock_irqsave(&video->irqlock, flags);
if (unlikely(video->error)) {
- vb2_buffer_done(&buffer->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_ERROR);
spin_unlock_irqrestore(&video->irqlock, flags);
return;
}
@@ -464,7 +467,7 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
list_del(&buf->irqlist);
spin_unlock_irqrestore(&video->irqlock, flags);
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&buf->vb.timestamp);
/* Do frame number propagation only if this is the output video node.
* Frame number either comes from the CSI receivers or it gets
@@ -473,15 +476,15 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
* first, so the input number might lag behind by 1 in some cases.
*/
if (video == pipe->output && !pipe->do_propagation)
- buf->vb.v4l2_buf.sequence =
+ buf->vb.sequence =
atomic_inc_return(&pipe->frame_number);
else
- buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);
+ buf->vb.sequence = atomic_read(&pipe->frame_number);
if (pipe->field != V4L2_FIELD_NONE)
- buf->vb.v4l2_buf.sequence /= 2;
+ buf->vb.sequence /= 2;
- buf->vb.v4l2_buf.field = pipe->field;
+ buf->vb.field = pipe->field;
/* Report pipeline errors to userspace on the capture device side. */
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
@@ -491,7 +494,7 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
state = VB2_BUF_STATE_DONE;
}
- vb2_buffer_done(&buf->vb, state);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
spin_lock_irqsave(&video->irqlock, flags);
@@ -546,7 +549,7 @@ void omap3isp_video_cancel_stream(struct isp_video *video)
buf = list_first_entry(&video->dmaqueue,
struct isp_buffer, irqlist);
list_del(&buf->irqlist);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
video->error = true;
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 4071dd7060ea..bcf0e0acc8f3 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -20,7 +20,7 @@
#include <media/media-entity.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#define ISP_VIDEO_DRIVER_NAME "ispvideo"
#define ISP_VIDEO_DRIVER_VERSION "0.0.2"
@@ -122,7 +122,7 @@ static inline int isp_pipeline_ready(struct isp_pipeline *pipe)
* @dma: DMA address
*/
struct isp_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head irqlist;
dma_addr_t dma;
};
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 2973f070d328..f8e3e83c52a2 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -37,7 +37,7 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
@@ -471,7 +471,7 @@ static const char *error_to_text[16] = {
"Unknown"
};
-static struct jpu_buffer *vb2_to_jpu_buffer(struct vb2_buffer *vb)
+static struct jpu_buffer *vb2_to_jpu_buffer(struct vb2_v4l2_buffer *vb)
{
struct v4l2_m2m_buffer *b =
container_of(vb, struct v4l2_m2m_buffer, vb);
@@ -1015,10 +1015,11 @@ error_free:
* ============================================================================
*/
static int jpu_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
struct jpu_q_data *q_data;
unsigned int i;
@@ -1044,6 +1045,7 @@ static int jpu_queue_setup(struct vb2_queue *vq,
static int jpu_buf_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct jpu_q_data *q_data;
unsigned int i;
@@ -1051,9 +1053,9 @@ static int jpu_buf_prepare(struct vb2_buffer *vb)
q_data = jpu_get_q_data(ctx, vb->vb2_queue->type);
if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
- if (vb->v4l2_buf.field == V4L2_FIELD_ANY)
- vb->v4l2_buf.field = V4L2_FIELD_NONE;
- if (vb->v4l2_buf.field != V4L2_FIELD_NONE) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
dev_err(ctx->jpu->dev, "%s field isn't supported\n",
__func__);
return -EINVAL;
@@ -1080,10 +1082,11 @@ static int jpu_buf_prepare(struct vb2_buffer *vb)
static void jpu_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
if (!ctx->encoder && V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
- struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vb);
+ struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf);
struct jpu_q_data *q_data, adjust;
void *buffer = vb2_plane_vaddr(vb, 0);
unsigned long buf_size = vb2_get_plane_payload(vb, 0);
@@ -1117,7 +1120,7 @@ static void jpu_buf_queue(struct vb2_buffer *vb)
}
if (ctx->fh.m2m_ctx)
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
return;
@@ -1128,14 +1131,15 @@ format_error:
static void jpu_buf_finish(struct vb2_buffer *vb)
{
- struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf);
struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct jpu_q_data *q_data = &ctx->out_q;
enum v4l2_buf_type type = vb->vb2_queue->type;
u8 *buffer;
if (vb->state == VB2_BUF_STATE_DONE)
- vb->v4l2_buf.sequence = jpu_get_q_data(ctx, type)->sequence++;
+ vbuf->sequence = jpu_get_q_data(ctx, type)->sequence++;
if (!ctx->encoder || vb->state != VB2_BUF_STATE_DONE ||
V4L2_TYPE_IS_OUTPUT(type))
@@ -1144,9 +1148,9 @@ static void jpu_buf_finish(struct vb2_buffer *vb)
buffer = vb2_plane_vaddr(vb, 0);
memcpy(buffer, jpeg_hdrs[jpu_buf->compr_quality], JPU_JPEG_HDR_SIZE);
- *(u16 *)(buffer + JPU_JPEG_HEIGHT_OFFSET) =
+ *(__be16 *)(buffer + JPU_JPEG_HEIGHT_OFFSET) =
cpu_to_be16(q_data->format.height);
- *(u16 *)(buffer + JPU_JPEG_WIDTH_OFFSET) =
+ *(__be16 *)(buffer + JPU_JPEG_WIDTH_OFFSET) =
cpu_to_be16(q_data->format.width);
*(buffer + JPU_JPEG_SUBS_OFFSET) = q_data->fmtinfo->subsampling;
}
@@ -1163,7 +1167,7 @@ static int jpu_start_streaming(struct vb2_queue *vq, unsigned count)
static void jpu_stop_streaming(struct vb2_queue *vq)
{
struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb;
unsigned long flags;
for (;;) {
@@ -1327,7 +1331,7 @@ static const struct v4l2_file_operations jpu_fops = {
static void jpu_cleanup(struct jpu_ctx *ctx, bool reset)
{
/* remove current buffers and finish job */
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
unsigned long flags;
spin_lock_irqsave(&ctx->jpu->lock, flags);
@@ -1353,7 +1357,7 @@ static void jpu_device_run(void *priv)
struct jpu *jpu = ctx->jpu;
struct jpu_buffer *jpu_buf;
struct jpu_q_data *q_data;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
unsigned int w, h, bpl;
unsigned char num_planes, subsampling;
unsigned long flags;
@@ -1389,10 +1393,12 @@ static void jpu_device_run(void *priv)
unsigned long src_1_addr, src_2_addr, dst_addr;
unsigned int redu, inft;
- dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
- src_1_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ src_1_addr =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
if (num_planes > 1)
- src_2_addr = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ src_2_addr = vb2_dma_contig_plane_dma_addr(
+ &src_buf->vb2_buf, 1);
else
src_2_addr = src_1_addr + w * h;
@@ -1453,10 +1459,12 @@ static void jpu_device_run(void *priv)
return;
}
- src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- dst_1_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ dst_1_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
if (q_data->fmtinfo->num_planes > 1)
- dst_2_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+ dst_2_addr = vb2_dma_contig_plane_dma_addr(
+ &dst_buf->vb2_buf, 1);
else
dst_2_addr = dst_1_addr + w * h;
@@ -1511,7 +1519,7 @@ static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
{
struct jpu *jpu = dev_id;
struct jpu_ctx *curr_ctx;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
unsigned int int_status;
int_status = jpu_read(jpu, JINTS);
@@ -1547,18 +1555,18 @@ static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
unsigned long payload_size = jpu_read(jpu, JCDTCU) << 16
| jpu_read(jpu, JCDTCM) << 8
| jpu_read(jpu, JCDTCD);
- vb2_set_plane_payload(dst_buf, 0,
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
payload_size + JPU_JPEG_HDR_SIZE);
}
- dst_buf->v4l2_buf.field = src_buf->v4l2_buf.field;
- dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
- if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TIMECODE)
- dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
- dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_buf->v4l2_buf.flags |= src_buf->v4l2_buf.flags &
+ dst_buf->field = src_buf->field;
+ dst_buf->timestamp = src_buf->timestamp;
+ if (src_buf->flags & V4L2_BUF_FLAG_TIMECODE)
+ dst_buf->timecode = src_buf->timecode;
+ dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->flags |= src_buf->flags &
V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_buf->v4l2_buf.flags = src_buf->v4l2_buf.flags &
+ dst_buf->flags = src_buf->flags &
(V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_KEYFRAME |
V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME |
V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 76e6289a5612..537b858cb94a 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -34,7 +34,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "camif-core.h"
@@ -164,12 +164,12 @@ static int camif_reinitialize(struct camif_vp *vp)
/* Release unused buffers */
while (!list_empty(&vp->pending_buf_q)) {
buf = camif_pending_queue_pop(vp);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
while (!list_empty(&vp->active_buf_q)) {
buf = camif_active_queue_pop(vp);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&camif->slock, flags);
@@ -328,25 +328,19 @@ irqreturn_t s3c_camif_irq_handler(int irq, void *priv)
!list_empty(&vp->active_buf_q)) {
unsigned int index;
struct camif_buffer *vbuf;
- struct timeval *tv;
- struct timespec ts;
/*
* Get previous DMA write buffer index:
* 0 => DMA buffer 0, 2;
* 1 => DMA buffer 1, 3.
*/
index = (CISTATUS_FRAMECNT(status) + 2) & 1;
-
- ktime_get_ts(&ts);
vbuf = camif_active_queue_peek(vp, index);
if (!WARN_ON(vbuf == NULL)) {
/* Dequeue a filled buffer */
- tv = &vbuf->vb.v4l2_buf.timestamp;
- tv->tv_sec = ts.tv_sec;
- tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- vbuf->vb.v4l2_buf.sequence = vp->frame_sequence++;
- vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&vbuf->vb.timestamp);
+ vbuf->vb.sequence = vp->frame_sequence++;
+ vb2_buffer_done(&vbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
/* Set up an empty buffer at the DMA engine */
vbuf = camif_pending_queue_pop(vp);
@@ -441,10 +435,11 @@ static void stop_streaming(struct vb2_queue *vq)
camif_stop_capture(vp);
}
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+static int queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *allocators[])
{
+ const struct v4l2_format *pfmt = parg;
const struct v4l2_pix_format *pix = NULL;
struct camif_vp *vp = vb2_get_drv_priv(vq);
struct camif_dev *camif = vp->camif;
@@ -496,13 +491,14 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_queue(struct vb2_buffer *vb)
{
- struct camif_buffer *buf = container_of(vb, struct camif_buffer, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct camif_buffer *buf = container_of(vbuf, struct camif_buffer, vb);
struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
struct camif_dev *camif = vp->camif;
unsigned long flags;
spin_lock_irqsave(&camif->slock, flags);
- WARN_ON(camif_prepare_addr(vp, &buf->vb, &buf->paddr));
+ WARN_ON(camif_prepare_addr(vp, &buf->vb.vb2_buf, &buf->paddr));
if (!(vp->state & ST_VP_STREAMING) && vp->active_buffers < 2) {
/* Schedule an empty buffer in H/W */
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index f47b332f0418..1ba9bb08f5da 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -32,7 +32,7 @@
#include <media/media-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "camif-core.h"
diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h
index 35d2fcdc0036..adaf1969ef63 100644
--- a/drivers/media/platform/s3c-camif/camif-core.h
+++ b/drivers/media/platform/s3c-camif/camif-core.h
@@ -25,7 +25,7 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mediabus.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/s3c_camif.h>
#define S3C_CAMIF_DRIVER_NAME "s3c-camif"
@@ -322,7 +322,7 @@ struct camif_addr {
* @index: an identifier of this buffer at the DMA engine
*/
struct camif_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
struct camif_addr paddr;
unsigned int index;
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 421a7c3b595b..e1936d9d27da 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -23,7 +23,7 @@
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "g2d.h"
@@ -101,7 +101,7 @@ static struct g2d_frame *get_frame(struct g2d_ctx *ctx,
}
}
-static int g2d_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int g2d_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -134,8 +134,9 @@ static int g2d_buf_prepare(struct vb2_buffer *vb)
static void g2d_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static struct vb2_ops g2d_qops = {
@@ -537,7 +538,7 @@ static irqreturn_t g2d_isr(int irq, void *prv)
{
struct g2d_dev *dev = prv;
struct g2d_ctx *ctx = dev->curr;
- struct vb2_buffer *src, *dst;
+ struct vb2_v4l2_buffer *src, *dst;
g2d_clear_int(dev);
clk_disable(dev->gate);
@@ -550,11 +551,11 @@ static irqreturn_t g2d_isr(int irq, void *prv)
BUG_ON(src == NULL);
BUG_ON(dst == NULL);
- dst->v4l2_buf.timecode = src->v4l2_buf.timecode;
- dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp;
- dst->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst->v4l2_buf.flags |=
- src->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->timecode = src->timecode;
+ dst->timestamp = src->timestamp;
+ dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->flags |=
+ src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 9690f9dcb0ca..4a608cbe0fdb 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -26,7 +26,7 @@
#include <linux/string.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "jpeg-core.h"
@@ -626,6 +626,7 @@ static int s5p_jpeg_to_user_subsampling(struct s5p_jpeg_ctx *ctx)
return V4L2_JPEG_CHROMA_SUBSAMPLING_411;
return exynos3250_decoded_subsampling[ctx->subsampling];
case SJPEG_EXYNOS4:
+ case SJPEG_EXYNOS5433:
if (ctx->subsampling > 2)
return V4L2_JPEG_CHROMA_SUBSAMPLING_420;
return exynos4x12_decoded_subsampling[ctx->subsampling];
@@ -750,6 +751,208 @@ static void exynos4_jpeg_set_huff_tbl(void __iomem *base)
ARRAY_SIZE(hactblg0));
}
+static inline int __exynos4_huff_tbl(int class, int id, bool lenval)
+{
+ /*
+ * class: 0 - DC, 1 - AC
+ * id: 0 - Y, 1 - Cb/Cr
+ */
+ if (class) {
+ if (id)
+ return lenval ? EXYNOS4_HUFF_TBL_HACCL :
+ EXYNOS4_HUFF_TBL_HACCV;
+ return lenval ? EXYNOS4_HUFF_TBL_HACLL : EXYNOS4_HUFF_TBL_HACLV;
+
+ }
+ /* class == 0 */
+ if (id)
+ return lenval ? EXYNOS4_HUFF_TBL_HDCCL : EXYNOS4_HUFF_TBL_HDCCV;
+
+ return lenval ? EXYNOS4_HUFF_TBL_HDCLL : EXYNOS4_HUFF_TBL_HDCLV;
+}
+
+static inline int exynos4_huff_tbl_len(int class, int id)
+{
+ return __exynos4_huff_tbl(class, id, true);
+}
+
+static inline int exynos4_huff_tbl_val(int class, int id)
+{
+ return __exynos4_huff_tbl(class, id, false);
+}
+
+static int get_byte(struct s5p_jpeg_buffer *buf);
+static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word);
+static void skip(struct s5p_jpeg_buffer *buf, long len);
+
+static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct s5p_jpeg_buffer jpeg_buffer;
+ unsigned int word;
+ int c, x, components;
+
+ jpeg_buffer.size = 2; /* Ls */
+ jpeg_buffer.data =
+ (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sos + 2;
+ jpeg_buffer.curr = 0;
+
+ word = 0;
+
+ if (get_word_be(&jpeg_buffer, &word))
+ return;
+ jpeg_buffer.size = (long)word - 2;
+ jpeg_buffer.data += 2;
+ jpeg_buffer.curr = 0;
+
+ components = get_byte(&jpeg_buffer);
+ if (components == -1)
+ return;
+ while (components--) {
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ x = get_byte(&jpeg_buffer);
+ if (x == -1)
+ return;
+ exynos4_jpeg_select_dec_h_tbl(jpeg->regs, c,
+ (((x >> 4) & 0x1) << 1) | (x & 0x1));
+ }
+
+}
+
+static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct s5p_jpeg_buffer jpeg_buffer;
+ unsigned int word;
+ int c, i, n, j;
+
+ for (j = 0; j < ctx->out_q.dht.n; ++j) {
+ jpeg_buffer.size = ctx->out_q.dht.len[j];
+ jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
+ ctx->out_q.dht.marker[j];
+ jpeg_buffer.curr = 0;
+
+ word = 0;
+ while (jpeg_buffer.curr < jpeg_buffer.size) {
+ char id, class;
+
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ id = c & 0xf;
+ class = (c >> 4) & 0xf;
+ n = 0;
+ for (i = 0; i < 16; ++i) {
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ word |= c << ((i % 4) * 8);
+ if ((i + 1) % 4 == 0) {
+ writel(word, jpeg->regs +
+ exynos4_huff_tbl_len(class, id) +
+ (i / 4) * 4);
+ word = 0;
+ }
+ n += c;
+ }
+ word = 0;
+ for (i = 0; i < n; ++i) {
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ word |= c << ((i % 4) * 8);
+ if ((i + 1) % 4 == 0) {
+ writel(word, jpeg->regs +
+ exynos4_huff_tbl_val(class, id) +
+ (i / 4) * 4);
+ word = 0;
+ }
+ }
+ if (i % 4) {
+ writel(word, jpeg->regs +
+ exynos4_huff_tbl_val(class, id) + (i / 4) * 4);
+ }
+ word = 0;
+ }
+ }
+}
+
+static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct s5p_jpeg_buffer jpeg_buffer;
+ int c, x, components;
+
+ jpeg_buffer.size = ctx->out_q.sof_len;
+ jpeg_buffer.data =
+ (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sof;
+ jpeg_buffer.curr = 0;
+
+ skip(&jpeg_buffer, 5); /* P, Y, X */
+ components = get_byte(&jpeg_buffer);
+ if (components == -1)
+ return;
+
+ exynos4_jpeg_set_dec_components(jpeg->regs, components);
+
+ while (components--) {
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ skip(&jpeg_buffer, 1);
+ x = get_byte(&jpeg_buffer);
+ if (x == -1)
+ return;
+ exynos4_jpeg_select_dec_q_tbl(jpeg->regs, c, x);
+ }
+}
+
+static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct s5p_jpeg_buffer jpeg_buffer;
+ unsigned int word;
+ int c, i, j;
+
+ for (j = 0; j < ctx->out_q.dqt.n; ++j) {
+ jpeg_buffer.size = ctx->out_q.dqt.len[j];
+ jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
+ ctx->out_q.dqt.marker[j];
+ jpeg_buffer.curr = 0;
+
+ word = 0;
+ while (jpeg_buffer.size - jpeg_buffer.curr >= 65) {
+ char id;
+
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ id = c & 0xf;
+ /* nonzero means extended mode - not supported */
+ if ((c >> 4) & 0xf)
+ return;
+ for (i = 0; i < 64; ++i) {
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ return;
+ word |= c << ((i % 4) * 8);
+ if ((i + 1) % 4 == 0) {
+ writel(word, jpeg->regs +
+ EXYNOS4_QTBL_CONTENT(id) + (i / 4) * 4);
+ word = 0;
+ }
+ }
+ word = 0;
+ }
+ }
+}
+
/*
* ============================================================================
* Device file operations
@@ -894,8 +1097,11 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
unsigned long buffer, unsigned long size,
struct s5p_jpeg_ctx *ctx)
{
- int c, components = 0, notfound;
- unsigned int height, width, word, subsampling = 0;
+ int c, components = 0, notfound, n_dht = 0, n_dqt = 0;
+ unsigned int height, width, word, subsampling = 0, sos = 0, sof = 0,
+ sof_len = 0;
+ unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER],
+ dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
long length;
struct s5p_jpeg_buffer jpeg_buffer;
@@ -904,7 +1110,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
jpeg_buffer.curr = 0;
notfound = 1;
- while (notfound) {
+ while (notfound || !sos) {
c = get_byte(&jpeg_buffer);
if (c == -1)
return false;
@@ -923,6 +1129,11 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
case SOF0:
if (get_word_be(&jpeg_buffer, &word))
break;
+ length = (long)word - 2;
+ if (!length)
+ return false;
+ sof = jpeg_buffer.curr; /* after 0xffc0 */
+ sof_len = length;
if (get_byte(&jpeg_buffer) == -1)
break;
if (get_word_be(&jpeg_buffer, &height))
@@ -932,7 +1143,6 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
components = get_byte(&jpeg_buffer);
if (components == -1)
break;
- notfound = 0;
if (components == 1) {
subsampling = 0x33;
@@ -941,8 +1151,40 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
subsampling = get_byte(&jpeg_buffer);
skip(&jpeg_buffer, 1);
}
-
+ if (components > 3)
+ return false;
skip(&jpeg_buffer, components * 2);
+ notfound = 0;
+ break;
+
+ case DQT:
+ if (get_word_be(&jpeg_buffer, &word))
+ break;
+ length = (long)word - 2;
+ if (!length)
+ return false;
+ if (n_dqt >= S5P_JPEG_MAX_MARKER)
+ return false;
+ dqt[n_dqt] = jpeg_buffer.curr; /* after 0xffdb */
+ dqt_len[n_dqt++] = length;
+ skip(&jpeg_buffer, length);
+ break;
+
+ case DHT:
+ if (get_word_be(&jpeg_buffer, &word))
+ break;
+ length = (long)word - 2;
+ if (!length)
+ return false;
+ if (n_dht >= S5P_JPEG_MAX_MARKER)
+ return false;
+ dht[n_dht] = jpeg_buffer.curr; /* after 0xffc4 */
+ dht_len[n_dht++] = length;
+ skip(&jpeg_buffer, length);
+ break;
+
+ case SOS:
+ sos = jpeg_buffer.curr - 2; /* 0xffda */
break;
/* skip payload-less markers */
@@ -963,7 +1205,20 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
}
result->w = width;
result->h = height;
- result->size = components;
+ result->sos = sos;
+ result->dht.n = n_dht;
+ while (n_dht--) {
+ result->dht.marker[n_dht] = dht[n_dht];
+ result->dht.len[n_dht] = dht_len[n_dht];
+ }
+ result->dqt.n = n_dqt;
+ while (n_dqt--) {
+ result->dqt.marker[n_dqt] = dqt[n_dqt];
+ result->dqt.len[n_dqt] = dqt_len[n_dqt];
+ }
+ result->sof = sof;
+ result->sof_len = sof_len;
+ result->size = result->components = components;
switch (subsampling) {
case 0x11:
@@ -982,7 +1237,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
return false;
}
- return !notfound;
+ return !notfound && sos;
}
static int s5p_jpeg_querycap(struct file *file, void *priv,
@@ -1226,8 +1481,7 @@ static int s5p_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
}
- if ((ctx->jpeg->variant->version != SJPEG_EXYNOS4) ||
- (ctx->mode != S5P_JPEG_DECODE))
+ if (!ctx->jpeg->variant->hw_ex4_compat || ctx->mode != S5P_JPEG_DECODE)
goto exit;
/*
@@ -1350,7 +1604,7 @@ static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
* the JPEG_IMAGE_SIZE register. In order to avoid sysmmu
* page fault calculate proper buffer size in such a case.
*/
- if (ct->jpeg->variant->version == SJPEG_EXYNOS4 &&
+ if (ct->jpeg->variant->hw_ex4_compat &&
f_type == FMT_TYPE_OUTPUT && ct->mode == S5P_JPEG_ENCODE)
q_data->size = exynos4_jpeg_get_output_buffer_size(ct,
f,
@@ -1889,9 +2143,36 @@ static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (jpeg->variant->version == SJPEG_EXYNOS5433 &&
+ ctx->mode == S5P_JPEG_DECODE)
+ jpeg_addr += ctx->out_q.sos;
exynos4_jpeg_set_stream_buf_address(jpeg->regs, jpeg_addr);
}
+static inline void exynos4_jpeg_set_img_fmt(void __iomem *base,
+ unsigned int img_fmt)
+{
+ __exynos4_jpeg_set_img_fmt(base, img_fmt, SJPEG_EXYNOS4);
+}
+
+static inline void exynos5433_jpeg_set_img_fmt(void __iomem *base,
+ unsigned int img_fmt)
+{
+ __exynos4_jpeg_set_img_fmt(base, img_fmt, SJPEG_EXYNOS5433);
+}
+
+static inline void exynos4_jpeg_set_enc_out_fmt(void __iomem *base,
+ unsigned int out_fmt)
+{
+ __exynos4_jpeg_set_enc_out_fmt(base, out_fmt, SJPEG_EXYNOS4);
+}
+
+static inline void exynos5433_jpeg_set_enc_out_fmt(void __iomem *base,
+ unsigned int out_fmt)
+{
+ __exynos4_jpeg_set_enc_out_fmt(base, out_fmt, SJPEG_EXYNOS5433);
+}
+
static void exynos4_jpeg_device_run(void *priv)
{
struct s5p_jpeg_ctx *ctx = priv;
@@ -1899,11 +2180,11 @@ static void exynos4_jpeg_device_run(void *priv)
unsigned int bitstream_size;
unsigned long flags;
- spin_lock_irqsave(&ctx->jpeg->slock, flags);
+ spin_lock_irqsave(&jpeg->slock, flags);
if (ctx->mode == S5P_JPEG_ENCODE) {
exynos4_jpeg_sw_reset(jpeg->regs);
- exynos4_jpeg_set_interrupt(jpeg->regs);
+ exynos4_jpeg_set_interrupt(jpeg->regs, jpeg->variant->version);
exynos4_jpeg_set_huf_table_enable(jpeg->regs, 1);
exynos4_jpeg_set_huff_tbl(jpeg->regs);
@@ -1920,27 +2201,56 @@ static void exynos4_jpeg_device_run(void *priv)
exynos4_jpeg_set_stream_size(jpeg->regs, ctx->cap_q.w,
ctx->cap_q.h);
- exynos4_jpeg_set_enc_out_fmt(jpeg->regs, ctx->subsampling);
- exynos4_jpeg_set_img_fmt(jpeg->regs, ctx->out_q.fmt->fourcc);
+ if (ctx->jpeg->variant->version == SJPEG_EXYNOS4) {
+ exynos4_jpeg_set_enc_out_fmt(jpeg->regs,
+ ctx->subsampling);
+ exynos4_jpeg_set_img_fmt(jpeg->regs,
+ ctx->out_q.fmt->fourcc);
+ } else {
+ exynos5433_jpeg_set_enc_out_fmt(jpeg->regs,
+ ctx->subsampling);
+ exynos5433_jpeg_set_img_fmt(jpeg->regs,
+ ctx->out_q.fmt->fourcc);
+ }
exynos4_jpeg_set_img_addr(ctx);
exynos4_jpeg_set_jpeg_addr(ctx);
exynos4_jpeg_set_encode_hoff_cnt(jpeg->regs,
ctx->out_q.fmt->fourcc);
} else {
exynos4_jpeg_sw_reset(jpeg->regs);
- exynos4_jpeg_set_interrupt(jpeg->regs);
+ exynos4_jpeg_set_interrupt(jpeg->regs,
+ jpeg->variant->version);
exynos4_jpeg_set_img_addr(ctx);
exynos4_jpeg_set_jpeg_addr(ctx);
- exynos4_jpeg_set_img_fmt(jpeg->regs, ctx->cap_q.fmt->fourcc);
- bitstream_size = DIV_ROUND_UP(ctx->out_q.size, 32);
+ if (jpeg->variant->version == SJPEG_EXYNOS5433) {
+ exynos4_jpeg_parse_huff_tbl(ctx);
+ exynos4_jpeg_parse_decode_h_tbl(ctx);
+
+ exynos4_jpeg_parse_q_tbl(ctx);
+ exynos4_jpeg_parse_decode_q_tbl(ctx);
+
+ exynos4_jpeg_set_huf_table_enable(jpeg->regs, 1);
+
+ exynos4_jpeg_set_stream_size(jpeg->regs, ctx->cap_q.w,
+ ctx->cap_q.h);
+ exynos5433_jpeg_set_enc_out_fmt(jpeg->regs,
+ ctx->subsampling);
+ exynos5433_jpeg_set_img_fmt(jpeg->regs,
+ ctx->cap_q.fmt->fourcc);
+ bitstream_size = DIV_ROUND_UP(ctx->out_q.size, 16);
+ } else {
+ exynos4_jpeg_set_img_fmt(jpeg->regs,
+ ctx->cap_q.fmt->fourcc);
+ bitstream_size = DIV_ROUND_UP(ctx->out_q.size, 32);
+ }
exynos4_jpeg_set_dec_bitstream_size(jpeg->regs, bitstream_size);
}
exynos4_jpeg_set_enc_dec_mode(jpeg->regs, ctx->mode);
- spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
+ spin_unlock_irqrestore(&jpeg->slock, flags);
}
static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
@@ -2120,7 +2430,7 @@ static struct v4l2_m2m_ops exynos4_jpeg_m2m_ops = {
*/
static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -2170,6 +2480,7 @@ static int s5p_jpeg_buf_prepare(struct vb2_buffer *vb)
static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
if (ctx->mode == S5P_JPEG_DECODE &&
@@ -2187,13 +2498,24 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
q_data = &ctx->out_q;
q_data->w = tmp.w;
q_data->h = tmp.h;
+ q_data->sos = tmp.sos;
+ memcpy(q_data->dht.marker, tmp.dht.marker,
+ sizeof(tmp.dht.marker));
+ memcpy(q_data->dht.len, tmp.dht.len, sizeof(tmp.dht.len));
+ q_data->dht.n = tmp.dht.n;
+ memcpy(q_data->dqt.marker, tmp.dqt.marker,
+ sizeof(tmp.dqt.marker));
+ memcpy(q_data->dqt.len, tmp.dqt.len, sizeof(tmp.dqt.len));
+ q_data->dqt.n = tmp.dqt.n;
+ q_data->sof = tmp.sof;
+ q_data->sof_len = tmp.sof_len;
q_data = &ctx->cap_q;
q_data->w = tmp.w;
q_data->h = tmp.h;
}
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
@@ -2264,7 +2586,7 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
{
struct s5p_jpeg *jpeg = dev_id;
struct s5p_jpeg_ctx *curr_ctx;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
unsigned long payload_size = 0;
enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
bool enc_jpeg_too_large = false;
@@ -2298,15 +2620,15 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
payload_size = s5p_jpeg_compressed_size(jpeg->regs);
}
- dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
- dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
- dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_buf->v4l2_buf.flags |=
- src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->timecode = src_buf->timecode;
+ dst_buf->timestamp = src_buf->timestamp;
+ dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->flags |=
+ src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
v4l2_m2m_buf_done(src_buf, state);
if (curr_ctx->mode == S5P_JPEG_ENCODE)
- vb2_set_plane_payload(dst_buf, 0, payload_size);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size);
v4l2_m2m_buf_done(dst_buf, state);
v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
@@ -2321,7 +2643,7 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
static irqreturn_t exynos4_jpeg_irq(int irq, void *priv)
{
unsigned int int_status;
- struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
struct s5p_jpeg *jpeg = priv;
struct s5p_jpeg_ctx *curr_ctx;
unsigned long payload_size = 0;
@@ -2363,7 +2685,8 @@ static irqreturn_t exynos4_jpeg_irq(int irq, void *priv)
if (jpeg->irq_ret == OK_ENC_OR_DEC) {
if (curr_ctx->mode == S5P_JPEG_ENCODE) {
payload_size = exynos4_jpeg_get_stream_size(jpeg->regs);
- vb2_set_plane_payload(dst_vb, 0, payload_size);
+ vb2_set_plane_payload(&dst_vb->vb2_buf,
+ 0, payload_size);
}
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
@@ -2373,7 +2696,8 @@ static irqreturn_t exynos4_jpeg_irq(int irq, void *priv)
}
v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
- curr_ctx->subsampling = exynos4_jpeg_get_frame_fmt(jpeg->regs);
+ if (jpeg->variant->version == SJPEG_EXYNOS4)
+ curr_ctx->subsampling = exynos4_jpeg_get_frame_fmt(jpeg->regs);
spin_unlock(&jpeg->slock);
return IRQ_HANDLED;
@@ -2383,7 +2707,7 @@ static irqreturn_t exynos3250_jpeg_irq(int irq, void *dev_id)
{
struct s5p_jpeg *jpeg = dev_id;
struct s5p_jpeg_ctx *curr_ctx;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
unsigned long payload_size = 0;
enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
bool interrupt_timeout = false;
@@ -2427,12 +2751,12 @@ static irqreturn_t exynos3250_jpeg_irq(int irq, void *dev_id)
src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
- dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
- dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
+ dst_buf->timecode = src_buf->timecode;
+ dst_buf->timestamp = src_buf->timestamp;
v4l2_m2m_buf_done(src_buf, state);
if (curr_ctx->mode == S5P_JPEG_ENCODE)
- vb2_set_plane_payload(dst_buf, 0, payload_size);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size);
v4l2_m2m_buf_done(dst_buf, state);
v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
@@ -2455,7 +2779,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
{
struct s5p_jpeg *jpeg;
struct resource *res;
- int ret;
+ int i, ret;
/* JPEG IP abstraction struct */
jpeg = devm_kzalloc(&pdev->dev, sizeof(struct s5p_jpeg), GFP_KERNEL);
@@ -2490,23 +2814,21 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
}
/* clocks */
- jpeg->clk = clk_get(&pdev->dev, "jpeg");
- if (IS_ERR(jpeg->clk)) {
- dev_err(&pdev->dev, "cannot get clock\n");
- ret = PTR_ERR(jpeg->clk);
- return ret;
+ for (i = 0; i < jpeg->variant->num_clocks; i++) {
+ jpeg->clocks[i] = devm_clk_get(&pdev->dev,
+ jpeg->variant->clk_names[i]);
+ if (IS_ERR(jpeg->clocks[i])) {
+ dev_err(&pdev->dev, "failed to get clock: %s\n",
+ jpeg->variant->clk_names[i]);
+ return PTR_ERR(jpeg->clocks[i]);
+ }
}
- dev_dbg(&pdev->dev, "clock source %p\n", jpeg->clk);
-
- jpeg->sclk = clk_get(&pdev->dev, "sclk");
- if (IS_ERR(jpeg->sclk))
- dev_info(&pdev->dev, "sclk clock not available\n");
/* v4l2 device */
ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to register v4l2 device\n");
- goto clk_get_rollback;
+ return ret;
}
/* mem2mem device */
@@ -2603,17 +2925,13 @@ m2m_init_rollback:
device_register_rollback:
v4l2_device_unregister(&jpeg->v4l2_dev);
-clk_get_rollback:
- clk_put(jpeg->clk);
- if (!IS_ERR(jpeg->sclk))
- clk_put(jpeg->sclk);
-
return ret;
}
static int s5p_jpeg_remove(struct platform_device *pdev)
{
struct s5p_jpeg *jpeg = platform_get_drvdata(pdev);
+ int i;
pm_runtime_disable(jpeg->dev);
@@ -2624,15 +2942,10 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
v4l2_device_unregister(&jpeg->v4l2_dev);
if (!pm_runtime_status_suspended(&pdev->dev)) {
- clk_disable_unprepare(jpeg->clk);
- if (!IS_ERR(jpeg->sclk))
- clk_disable_unprepare(jpeg->sclk);
+ for (i = jpeg->variant->num_clocks - 1; i >= 0; i--)
+ clk_disable_unprepare(jpeg->clocks[i]);
}
- clk_put(jpeg->clk);
- if (!IS_ERR(jpeg->sclk))
- clk_put(jpeg->sclk);
-
return 0;
}
@@ -2640,10 +2953,10 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
static int s5p_jpeg_runtime_suspend(struct device *dev)
{
struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
+ int i;
- clk_disable_unprepare(jpeg->clk);
- if (!IS_ERR(jpeg->sclk))
- clk_disable_unprepare(jpeg->sclk);
+ for (i = jpeg->variant->num_clocks - 1; i >= 0; i--)
+ clk_disable_unprepare(jpeg->clocks[i]);
return 0;
}
@@ -2652,16 +2965,15 @@ static int s5p_jpeg_runtime_resume(struct device *dev)
{
struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
unsigned long flags;
- int ret;
+ int i, ret;
- ret = clk_prepare_enable(jpeg->clk);
- if (ret < 0)
- return ret;
-
- if (!IS_ERR(jpeg->sclk)) {
- ret = clk_prepare_enable(jpeg->sclk);
- if (ret < 0)
+ for (i = 0; i < jpeg->variant->num_clocks; i++) {
+ ret = clk_prepare_enable(jpeg->clocks[i]);
+ if (ret) {
+ while (--i > 0)
+ clk_disable_unprepare(jpeg->clocks[i]);
return ret;
+ }
}
spin_lock_irqsave(&jpeg->slock, flags);
@@ -2715,6 +3027,8 @@ static struct s5p_jpeg_variant s5p_jpeg_drvdata = {
.jpeg_irq = s5p_jpeg_irq,
.m2m_ops = &s5p_jpeg_m2m_ops,
.fmt_ver_flag = SJPEG_FMT_FLAG_S5P,
+ .clk_names = {"jpeg"},
+ .num_clocks = 1,
};
static struct s5p_jpeg_variant exynos3250_jpeg_drvdata = {
@@ -2723,6 +3037,8 @@ static struct s5p_jpeg_variant exynos3250_jpeg_drvdata = {
.m2m_ops = &exynos3250_jpeg_m2m_ops,
.fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS3250,
.hw3250_compat = 1,
+ .clk_names = {"jpeg", "sclk"},
+ .num_clocks = 2,
};
static struct s5p_jpeg_variant exynos4_jpeg_drvdata = {
@@ -2731,6 +3047,9 @@ static struct s5p_jpeg_variant exynos4_jpeg_drvdata = {
.m2m_ops = &exynos4_jpeg_m2m_ops,
.fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS4,
.htbl_reinit = 1,
+ .clk_names = {"jpeg"},
+ .num_clocks = 1,
+ .hw_ex4_compat = 1,
};
static struct s5p_jpeg_variant exynos5420_jpeg_drvdata = {
@@ -2740,6 +3059,19 @@ static struct s5p_jpeg_variant exynos5420_jpeg_drvdata = {
.fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS3250, /* intentionally 3250 */
.hw3250_compat = 1,
.htbl_reinit = 1,
+ .clk_names = {"jpeg"},
+ .num_clocks = 1,
+};
+
+static struct s5p_jpeg_variant exynos5433_jpeg_drvdata = {
+ .version = SJPEG_EXYNOS5433,
+ .jpeg_irq = exynos4_jpeg_irq,
+ .m2m_ops = &exynos4_jpeg_m2m_ops,
+ .fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS4,
+ .htbl_reinit = 1,
+ .clk_names = {"pclk", "aclk", "aclk_xiu", "sclk"},
+ .num_clocks = 4,
+ .hw_ex4_compat = 1,
};
static const struct of_device_id samsung_jpeg_match[] = {
@@ -2758,6 +3090,9 @@ static const struct of_device_id samsung_jpeg_match[] = {
}, {
.compatible = "samsung,exynos5420-jpeg",
.data = &exynos5420_jpeg_drvdata,
+ }, {
+ .compatible = "samsung,exynos5433-jpeg",
+ .data = &exynos5433_jpeg_drvdata,
},
{},
};
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 7d9a9ed19cea..9b1db0934909 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -20,6 +20,8 @@
#define S5P_JPEG_M2M_NAME "s5p-jpeg"
+#define JPEG_MAX_CLOCKS 4
+
/* JPEG compression quality setting */
#define S5P_JPEG_COMPR_QUAL_BEST 0
#define S5P_JPEG_COMPR_QUAL_WORST 3
@@ -40,9 +42,12 @@
/* a selection of JPEG markers */
#define TEM 0x01
#define SOF0 0xc0
+#define DHT 0xc4
#define RST 0xd0
#define SOI 0xd8
#define EOI 0xd9
+#define SOS 0xda
+#define DQT 0xdb
#define DHP 0xde
/* Flags that indicate a format can be used for capture/output */
@@ -66,12 +71,15 @@
#define SJPEG_SUBSAMPLING_422 0x21
#define SJPEG_SUBSAMPLING_420 0x22
+#define S5P_JPEG_MAX_MARKER 4
+
/* Version numbers */
enum sjpeg_version {
SJPEG_S5P,
SJPEG_EXYNOS3250,
SJPEG_EXYNOS4,
SJPEG_EXYNOS5420,
+ SJPEG_EXYNOS5433,
};
enum exynos4_jpeg_result {
@@ -100,8 +108,7 @@ enum exynos4_jpeg_img_quality_level {
* @m2m_dev: v4l2 mem2mem device data
* @regs: JPEG IP registers mapping
* @irq: JPEG IP irq
- * @clk: JPEG IP clock
- * @sclk: Exynos3250 JPEG IP special clock
+ * @clocks: JPEG IP clock(s)
* @dev: JPEG IP struct device
* @alloc_ctx: videobuf2 memory allocator's context
* @variant: driver variant to be used
@@ -121,8 +128,7 @@ struct s5p_jpeg {
void __iomem *regs;
unsigned int irq;
enum exynos4_jpeg_result irq_ret;
- struct clk *clk;
- struct clk *sclk;
+ struct clk *clocks[JPEG_MAX_CLOCKS];
struct device *dev;
void *alloc_ctx;
struct s5p_jpeg_variant *variant;
@@ -134,8 +140,11 @@ struct s5p_jpeg_variant {
unsigned int fmt_ver_flag;
unsigned int hw3250_compat:1;
unsigned int htbl_reinit:1;
+ unsigned int hw_ex4_compat:1;
struct v4l2_m2m_ops *m2m_ops;
irqreturn_t (*jpeg_irq)(int irq, void *priv);
+ const char *clk_names[JPEG_MAX_CLOCKS];
+ int num_clocks;
};
/**
@@ -161,16 +170,40 @@ struct s5p_jpeg_fmt {
};
/**
+ * s5p_jpeg_marker - collection of markers from jpeg header
+ * @marker: markers' positions relative to the buffer beginning
+ * @len: markers' payload lengths (without length field)
+ * @n: number of markers in collection
+ */
+struct s5p_jpeg_marker {
+ u32 marker[S5P_JPEG_MAX_MARKER];
+ u32 len[S5P_JPEG_MAX_MARKER];
+ u32 n;
+};
+
+/**
* s5p_jpeg_q_data - parameters of one queue
* @fmt: driver-specific format of this queue
* @w: image width
* @h: image height
+ * @sos: SOS marker's position relative to the buffer beginning
+ * @dht: DHT markers' positions relative to the buffer beginning
+ * @dqt: DQT markers' positions relative to the buffer beginning
+ * @sof: SOF0 marker's postition relative to the buffer beginning
+ * @sof_len: SOF0 marker's payload length (without length field itself)
+ * @components: number of image components
* @size: image buffer size in bytes
*/
struct s5p_jpeg_q_data {
struct s5p_jpeg_fmt *fmt;
u32 w;
u32 h;
+ u32 sos;
+ struct s5p_jpeg_marker dht;
+ struct s5p_jpeg_marker dqt;
+ u32 sof;
+ u32 sof_len;
+ u32 components;
u32 size;
};
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
index ab6d6f43c96f..0912d0a892e2 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
@@ -45,9 +45,20 @@ void exynos4_jpeg_set_enc_dec_mode(void __iomem *base, unsigned int mode)
}
}
-void exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt)
+void __exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt,
+ unsigned int version)
{
unsigned int reg;
+ unsigned int exynos4_swap_chroma_cbcr;
+ unsigned int exynos4_swap_chroma_crcb;
+
+ if (version == SJPEG_EXYNOS4) {
+ exynos4_swap_chroma_cbcr = EXYNOS4_SWAP_CHROMA_CBCR;
+ exynos4_swap_chroma_crcb = EXYNOS4_SWAP_CHROMA_CRCB;
+ } else {
+ exynos4_swap_chroma_cbcr = EXYNOS5433_SWAP_CHROMA_CBCR;
+ exynos4_swap_chroma_crcb = EXYNOS5433_SWAP_CHROMA_CRCB;
+ }
reg = readl(base + EXYNOS4_IMG_FMT_REG) &
EXYNOS4_ENC_IN_FMT_MASK; /* clear except enc format */
@@ -67,48 +78,48 @@ void exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt)
case V4L2_PIX_FMT_NV24:
reg = reg | EXYNOS4_ENC_YUV_444_IMG |
EXYNOS4_YUV_444_IP_YUV_444_2P_IMG |
- EXYNOS4_SWAP_CHROMA_CBCR;
+ exynos4_swap_chroma_cbcr;
break;
case V4L2_PIX_FMT_NV42:
reg = reg | EXYNOS4_ENC_YUV_444_IMG |
EXYNOS4_YUV_444_IP_YUV_444_2P_IMG |
- EXYNOS4_SWAP_CHROMA_CRCB;
+ exynos4_swap_chroma_crcb;
break;
case V4L2_PIX_FMT_YUYV:
reg = reg | EXYNOS4_DEC_YUV_422_IMG |
EXYNOS4_YUV_422_IP_YUV_422_1P_IMG |
- EXYNOS4_SWAP_CHROMA_CBCR;
+ exynos4_swap_chroma_cbcr;
break;
case V4L2_PIX_FMT_YVYU:
reg = reg | EXYNOS4_DEC_YUV_422_IMG |
EXYNOS4_YUV_422_IP_YUV_422_1P_IMG |
- EXYNOS4_SWAP_CHROMA_CRCB;
+ exynos4_swap_chroma_crcb;
break;
case V4L2_PIX_FMT_NV16:
reg = reg | EXYNOS4_DEC_YUV_422_IMG |
EXYNOS4_YUV_422_IP_YUV_422_2P_IMG |
- EXYNOS4_SWAP_CHROMA_CBCR;
+ exynos4_swap_chroma_cbcr;
break;
case V4L2_PIX_FMT_NV61:
reg = reg | EXYNOS4_DEC_YUV_422_IMG |
EXYNOS4_YUV_422_IP_YUV_422_2P_IMG |
- EXYNOS4_SWAP_CHROMA_CRCB;
+ exynos4_swap_chroma_crcb;
break;
case V4L2_PIX_FMT_NV12:
reg = reg | EXYNOS4_DEC_YUV_420_IMG |
EXYNOS4_YUV_420_IP_YUV_420_2P_IMG |
- EXYNOS4_SWAP_CHROMA_CBCR;
+ exynos4_swap_chroma_cbcr;
break;
case V4L2_PIX_FMT_NV21:
reg = reg | EXYNOS4_DEC_YUV_420_IMG |
EXYNOS4_YUV_420_IP_YUV_420_2P_IMG |
- EXYNOS4_SWAP_CHROMA_CRCB;
+ exynos4_swap_chroma_crcb;
break;
case V4L2_PIX_FMT_YUV420:
reg = reg | EXYNOS4_DEC_YUV_420_IMG |
EXYNOS4_YUV_420_IP_YUV_420_3P_IMG |
- EXYNOS4_SWAP_CHROMA_CBCR;
+ exynos4_swap_chroma_cbcr;
break;
default:
break;
@@ -118,12 +129,14 @@ void exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt)
writel(reg, base + EXYNOS4_IMG_FMT_REG);
}
-void exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt)
+void __exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt,
+ unsigned int version)
{
unsigned int reg;
reg = readl(base + EXYNOS4_IMG_FMT_REG) &
- ~EXYNOS4_ENC_FMT_MASK; /* clear enc format */
+ ~(version == SJPEG_EXYNOS4 ? EXYNOS4_ENC_FMT_MASK :
+ EXYNOS5433_ENC_FMT_MASK); /* clear enc format */
switch (out_fmt) {
case V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY:
@@ -149,9 +162,18 @@ void exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt)
writel(reg, base + EXYNOS4_IMG_FMT_REG);
}
-void exynos4_jpeg_set_interrupt(void __iomem *base)
+void exynos4_jpeg_set_interrupt(void __iomem *base, unsigned int version)
{
- writel(EXYNOS4_INT_EN_ALL, base + EXYNOS4_INT_EN_REG);
+ unsigned int reg;
+
+ if (version == SJPEG_EXYNOS4) {
+ reg = readl(base + EXYNOS4_INT_EN_REG) & ~EXYNOS4_INT_EN_MASK;
+ writel(reg | EXYNOS4_INT_EN_ALL, base + EXYNOS4_INT_EN_REG);
+ } else {
+ reg = readl(base + EXYNOS4_INT_EN_REG) &
+ ~EXYNOS5433_INT_EN_MASK;
+ writel(reg | EXYNOS5433_INT_EN_ALL, base + EXYNOS4_INT_EN_REG);
+ }
}
unsigned int exynos4_jpeg_get_int_status(void __iomem *base)
@@ -234,6 +256,36 @@ void exynos4_jpeg_set_encode_tbl_select(void __iomem *base,
writel(reg, base + EXYNOS4_TBL_SEL_REG);
}
+void exynos4_jpeg_set_dec_components(void __iomem *base, int n)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_TBL_SEL_REG);
+
+ reg |= EXYNOS4_NF(n);
+ writel(reg, base + EXYNOS4_TBL_SEL_REG);
+}
+
+void exynos4_jpeg_select_dec_q_tbl(void __iomem *base, char c, char x)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_TBL_SEL_REG);
+
+ reg |= EXYNOS4_Q_TBL_COMP(c, x);
+ writel(reg, base + EXYNOS4_TBL_SEL_REG);
+}
+
+void exynos4_jpeg_select_dec_h_tbl(void __iomem *base, char c, char x)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_TBL_SEL_REG);
+
+ reg |= EXYNOS4_HUFF_TBL_COMP(c, x);
+ writel(reg, base + EXYNOS4_TBL_SEL_REG);
+}
+
void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt)
{
if (fmt == V4L2_PIX_FMT_GREY)
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h
index c228d28a4bc7..cf6ec055d63a 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h
@@ -15,10 +15,12 @@
void exynos4_jpeg_sw_reset(void __iomem *base);
void exynos4_jpeg_set_enc_dec_mode(void __iomem *base, unsigned int mode);
-void exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt);
-void exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt);
+void __exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt,
+ unsigned int version);
+void __exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt,
+ unsigned int version);
void exynos4_jpeg_set_enc_tbl(void __iomem *base);
-void exynos4_jpeg_set_interrupt(void __iomem *base);
+void exynos4_jpeg_set_interrupt(void __iomem *base, unsigned int version);
unsigned int exynos4_jpeg_get_int_status(void __iomem *base);
void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value);
void exynos4_jpeg_set_sys_int_enable(void __iomem *base, int value);
@@ -30,6 +32,9 @@ void exynos4_jpeg_set_frame_buf_address(void __iomem *base,
struct s5p_jpeg_addr *jpeg_addr);
void exynos4_jpeg_set_encode_tbl_select(void __iomem *base,
enum exynos4_jpeg_img_quality_level level);
+void exynos4_jpeg_set_dec_components(void __iomem *base, int n);
+void exynos4_jpeg_select_dec_q_tbl(void __iomem *base, char c, char x);
+void exynos4_jpeg_select_dec_h_tbl(void __iomem *base, char c, char x);
void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt);
void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size);
unsigned int exynos4_jpeg_get_stream_size(void __iomem *base);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
index 050fc440248f..1870400468b2 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-regs.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
@@ -231,12 +231,14 @@
/* JPEG INT Register bit */
#define EXYNOS4_INT_EN_MASK (0x1f << 0)
+#define EXYNOS5433_INT_EN_MASK (0x1ff << 0)
#define EXYNOS4_PROT_ERR_INT_EN (1 << 0)
#define EXYNOS4_IMG_COMPLETION_INT_EN (1 << 1)
#define EXYNOS4_DEC_INVALID_FORMAT_EN (1 << 2)
#define EXYNOS4_MULTI_SCAN_ERROR_EN (1 << 3)
#define EXYNOS4_FRAME_ERR_EN (1 << 4)
#define EXYNOS4_INT_EN_ALL (0x1f << 0)
+#define EXYNOS5433_INT_EN_ALL (0x1b6 << 0)
#define EXYNOS4_MOD_REG_PROC_ENC (0 << 3)
#define EXYNOS4_MOD_REG_PROC_DEC (1 << 3)
@@ -296,6 +298,8 @@
#define EXYNOS4_ENC_FMT_SHIFT 24
#define EXYNOS4_ENC_FMT_MASK (3 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS5433_ENC_FMT_MASK (7 << EXYNOS4_ENC_FMT_SHIFT)
+
#define EXYNOS4_ENC_FMT_GRAY (0 << EXYNOS4_ENC_FMT_SHIFT)
#define EXYNOS4_ENC_FMT_YUV_444 (1 << EXYNOS4_ENC_FMT_SHIFT)
#define EXYNOS4_ENC_FMT_YUV_422 (2 << EXYNOS4_ENC_FMT_SHIFT)
@@ -305,6 +309,8 @@
#define EXYNOS4_SWAP_CHROMA_CRCB (1 << 26)
#define EXYNOS4_SWAP_CHROMA_CBCR (0 << 26)
+#define EXYNOS5433_SWAP_CHROMA_CRCB (1 << 27)
+#define EXYNOS5433_SWAP_CHROMA_CBCR (0 << 27)
/* JPEG HUFF count Register bit */
#define EXYNOS4_HUFF_COUNT_MASK 0xffff
@@ -316,35 +322,56 @@
#define EXYNOS4_DECODED_IMG_FMT_MASK 0x3
/* JPEG TBL SEL Register bit */
-#define EXYNOS4_Q_TBL_COMP1_0 (0 << 0)
-#define EXYNOS4_Q_TBL_COMP1_1 (1 << 0)
-#define EXYNOS4_Q_TBL_COMP1_2 (2 << 0)
-#define EXYNOS4_Q_TBL_COMP1_3 (3 << 0)
-
-#define EXYNOS4_Q_TBL_COMP2_0 (0 << 2)
-#define EXYNOS4_Q_TBL_COMP2_1 (1 << 2)
-#define EXYNOS4_Q_TBL_COMP2_2 (2 << 2)
-#define EXYNOS4_Q_TBL_COMP2_3 (3 << 2)
-
-#define EXYNOS4_Q_TBL_COMP3_0 (0 << 4)
-#define EXYNOS4_Q_TBL_COMP3_1 (1 << 4)
-#define EXYNOS4_Q_TBL_COMP3_2 (2 << 4)
-#define EXYNOS4_Q_TBL_COMP3_3 (3 << 4)
-
-#define EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_0 (0 << 6)
-#define EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_1 (1 << 6)
-#define EXYNOS4_HUFF_TBL_COMP1_AC_1_DC_0 (2 << 6)
-#define EXYNOS4_HUFF_TBL_COMP1_AC_1_DC_1 (3 << 6)
-
-#define EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_0 (0 << 8)
-#define EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_1 (1 << 8)
-#define EXYNOS4_HUFF_TBL_COMP2_AC_1_DC_0 (2 << 8)
-#define EXYNOS4_HUFF_TBL_COMP2_AC_1_DC_1 (3 << 8)
-
-#define EXYNOS4_HUFF_TBL_COMP3_AC_0_DC_0 (0 << 10)
-#define EXYNOS4_HUFF_TBL_COMP3_AC_0_DC_1 (1 << 10)
-#define EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_0 (2 << 10)
-#define EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_1 (3 << 10)
+#define EXYNOS4_Q_TBL_COMP(c, n) ((n) << (((c) - 1) << 1))
+
+#define EXYNOS4_Q_TBL_COMP1_0 EXYNOS4_Q_TBL_COMP(1, 0)
+#define EXYNOS4_Q_TBL_COMP1_1 EXYNOS4_Q_TBL_COMP(1, 1)
+#define EXYNOS4_Q_TBL_COMP1_2 EXYNOS4_Q_TBL_COMP(1, 2)
+#define EXYNOS4_Q_TBL_COMP1_3 EXYNOS4_Q_TBL_COMP(1, 3)
+
+#define EXYNOS4_Q_TBL_COMP2_0 EXYNOS4_Q_TBL_COMP(2, 0)
+#define EXYNOS4_Q_TBL_COMP2_1 EXYNOS4_Q_TBL_COMP(2, 1)
+#define EXYNOS4_Q_TBL_COMP2_2 EXYNOS4_Q_TBL_COMP(2, 2)
+#define EXYNOS4_Q_TBL_COMP2_3 EXYNOS4_Q_TBL_COMP(2, 3)
+
+#define EXYNOS4_Q_TBL_COMP3_0 EXYNOS4_Q_TBL_COMP(3, 0)
+#define EXYNOS4_Q_TBL_COMP3_1 EXYNOS4_Q_TBL_COMP(3, 1)
+#define EXYNOS4_Q_TBL_COMP3_2 EXYNOS4_Q_TBL_COMP(3, 2)
+#define EXYNOS4_Q_TBL_COMP3_3 EXYNOS4_Q_TBL_COMP(3, 3)
+
+#define EXYNOS4_HUFF_TBL_COMP(c, n) ((n) << ((((c) - 1) << 1) + 6))
+
+#define EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_0 \
+ EXYNOS4_HUFF_TBL_COMP(1, 0)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_1 \
+ EXYNOS4_HUFF_TBL_COMP(1, 1)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_1_DC_0 \
+ EXYNOS4_HUFF_TBL_COMP(1, 2)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_1_DC_1 \
+ EXYNOS4_HUFF_TBL_COMP(1, 3)
+
+#define EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_0 \
+ EXYNOS4_HUFF_TBL_COMP(2, 0)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_1 \
+ EXYNOS4_HUFF_TBL_COMP(2, 1)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_1_DC_0 \
+ EXYNOS4_HUFF_TBL_COMP(2, 2)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_1_DC_1 \
+ EXYNOS4_HUFF_TBL_COMP(2, 3)
+
+#define EXYNOS4_HUFF_TBL_COMP3_AC_0_DC_0 \
+ EXYNOS4_HUFF_TBL_COMP(3, 0)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_0_DC_1 \
+ EXYNOS4_HUFF_TBL_COMP(3, 1)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_0 \
+ EXYNOS4_HUFF_TBL_COMP(3, 2)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_1 \
+ EXYNOS4_HUFF_TBL_COMP(3, 3)
+
+#define EXYNOS4_NF_SHIFT 16
+#define EXYNOS4_NF_MASK 0xff
+#define EXYNOS4_NF(x) \
+ (((x) << EXYNOS4_NF_SHIFT) & EXYNOS4_NF_MASK)
/* JPEG quantizer table register */
#define EXYNOS4_QTBL_CONTENT(n) (0x100 + (n) * 0x40)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 8de61dc1e142..3ffe2ecfd5ef 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -22,7 +22,7 @@
#include <media/v4l2-event.h>
#include <linux/workqueue.h>
#include <linux/of.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include "s5p_mfc_common.h"
#include "s5p_mfc_ctrl.h"
#include "s5p_mfc_debug.h"
@@ -181,13 +181,6 @@ unlock:
mutex_unlock(&dev->mfc_mutex);
}
-static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
-{
- mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
- mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
- mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
-}
-
static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_buf *dst_buf;
@@ -199,22 +192,23 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
dst_buf = list_entry(ctx->dst_queue.next,
struct s5p_mfc_buf, list);
mfc_debug(2, "Cleaning up buffer: %d\n",
- dst_buf->b->v4l2_buf.index);
- vb2_set_plane_payload(dst_buf->b, 0, 0);
- vb2_set_plane_payload(dst_buf->b, 1, 0);
+ dst_buf->b->vb2_buf.index);
+ vb2_set_plane_payload(&dst_buf->b->vb2_buf, 0, 0);
+ vb2_set_plane_payload(&dst_buf->b->vb2_buf, 1, 0);
list_del(&dst_buf->list);
+ dst_buf->flags |= MFC_BUF_FLAG_EOS;
ctx->dst_queue_cnt--;
- dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
+ dst_buf->b->sequence = (ctx->sequence++);
if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) ==
s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx))
- dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
+ dst_buf->b->field = V4L2_FIELD_NONE;
else
- dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
- dst_buf->b->v4l2_buf.flags |= V4L2_BUF_FLAG_LAST;
+ dst_buf->b->field = V4L2_FIELD_INTERLACED;
+ dst_buf->b->flags |= V4L2_BUF_FLAG_LAST;
- ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index);
- vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE);
+ ctx->dec_dst_flag &= ~(1 << dst_buf->b->vb2_buf.index);
+ vb2_buffer_done(&dst_buf->b->vb2_buf, VB2_BUF_STATE_DONE);
}
}
@@ -235,27 +229,28 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
appropriate flags. */
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
- if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
- dst_buf->b->v4l2_buf.timecode =
- src_buf->b->v4l2_buf.timecode;
- dst_buf->b->v4l2_buf.timestamp =
- src_buf->b->v4l2_buf.timestamp;
- dst_buf->b->v4l2_buf.flags &=
+ if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
+ == dec_y_addr) {
+ dst_buf->b->timecode =
+ src_buf->b->timecode;
+ dst_buf->b->timestamp =
+ src_buf->b->timestamp;
+ dst_buf->b->flags &=
~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_buf->b->v4l2_buf.flags |=
- src_buf->b->v4l2_buf.flags
+ dst_buf->b->flags |=
+ src_buf->b->flags
& V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
switch (frame_type) {
case S5P_FIMV_DECODE_FRAME_I_FRAME:
- dst_buf->b->v4l2_buf.flags |=
+ dst_buf->b->flags |=
V4L2_BUF_FLAG_KEYFRAME;
break;
case S5P_FIMV_DECODE_FRAME_P_FRAME:
- dst_buf->b->v4l2_buf.flags |=
+ dst_buf->b->flags |=
V4L2_BUF_FLAG_PFRAME;
break;
case S5P_FIMV_DECODE_FRAME_B_FRAME:
- dst_buf->b->v4l2_buf.flags |=
+ dst_buf->b->flags |=
V4L2_BUF_FLAG_BFRAME;
break;
default:
@@ -296,25 +291,28 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
* check which videobuf does it correspond to */
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
/* Check if this is the buffer we're looking for */
- if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dspl_y_addr) {
+ if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
+ == dspl_y_addr) {
list_del(&dst_buf->list);
ctx->dst_queue_cnt--;
- dst_buf->b->v4l2_buf.sequence = ctx->sequence;
+ dst_buf->b->sequence = ctx->sequence;
if (s5p_mfc_hw_call(dev->mfc_ops,
get_pic_type_top, ctx) ==
s5p_mfc_hw_call(dev->mfc_ops,
get_pic_type_bot, ctx))
- dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
+ dst_buf->b->field = V4L2_FIELD_NONE;
else
- dst_buf->b->v4l2_buf.field =
+ dst_buf->b->field =
V4L2_FIELD_INTERLACED;
- vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size);
- vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size);
- clear_bit(dst_buf->b->v4l2_buf.index,
+ vb2_set_plane_payload(&dst_buf->b->vb2_buf, 0,
+ ctx->luma_size);
+ vb2_set_plane_payload(&dst_buf->b->vb2_buf, 1,
+ ctx->chroma_size);
+ clear_bit(dst_buf->b->vb2_buf.index,
&ctx->dec_dst_flag);
- vb2_buffer_done(dst_buf->b,
- err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&dst_buf->b->vb2_buf, err ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
break;
}
@@ -395,7 +393,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
ctx->codec_mode != S5P_MFC_CODEC_VP8_DEC &&
ctx->consumed_stream + STUFF_BYTE <
- src_buf->b->v4l2_planes[0].bytesused) {
+ src_buf->b->vb2_buf.planes[0].bytesused) {
/* Run MFC again on the same buffer */
mfc_debug(2, "Running again the same buffer\n");
ctx->after_packed_pb = 1;
@@ -407,9 +405,11 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
list_del(&src_buf->list);
ctx->src_queue_cnt--;
if (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) > 0)
- vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&src_buf->b->vb2_buf,
+ VB2_BUF_STATE_ERROR);
else
- vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&src_buf->b->vb2_buf,
+ VB2_BUF_STATE_DONE);
}
}
leave_handle_frame:
@@ -510,7 +510,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_buf, list);
if (s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream,
dev) <
- src_buf->b->v4l2_planes[0].bytesused)
+ src_buf->b->vb2_buf.planes[0].bytesused)
ctx->head_processed = 0;
else
ctx->head_processed = 1;
@@ -551,7 +551,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_buf, list);
list_del(&src_buf->list);
ctx->src_queue_cnt--;
- vb2_buffer_done(src_buf->b,
+ vb2_buffer_done(&src_buf->b->vb2_buf,
VB2_BUF_STATE_DONE);
}
spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -573,17 +573,13 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
}
}
-static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
- unsigned int reason, unsigned int err)
+static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *mb_entry;
mfc_debug(2, "Stream completed\n");
- s5p_mfc_clear_int_flags(dev);
- ctx->int_type = reason;
- ctx->int_err = err;
ctx->state = MFCINST_FINISHED;
spin_lock(&dev->irqlock);
@@ -592,8 +588,8 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
list);
list_del(&mb_entry->list);
ctx->dst_queue_cnt--;
- vb2_set_plane_payload(mb_entry->b, 0, 0);
- vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
+ vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, 0);
+ vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE);
}
spin_unlock(&dev->irqlock);
@@ -640,6 +636,13 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
if (ctx->c_ops->post_frame_start) {
if (ctx->c_ops->post_frame_start(ctx))
mfc_err("post_frame_start() failed\n");
+
+ if (ctx->state == MFCINST_FINISHING &&
+ list_empty(&ctx->ref_queue)) {
+ s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_handle_stream_complete(ctx);
+ break;
+ }
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
wake_up_ctx(ctx, reason, err);
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
@@ -685,7 +688,10 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
break;
case S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET:
- s5p_mfc_handle_stream_complete(ctx, reason, err);
+ s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
+ ctx->int_type = reason;
+ ctx->int_err = err;
+ s5p_mfc_handle_stream_complete(ctx);
break;
case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 24262bbb1a35..d1a3f9b1bc44 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -21,7 +21,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include "regs-mfc.h"
#include "regs-mfc-v8.h"
@@ -179,8 +179,8 @@ struct s5p_mfc_ctx;
* struct s5p_mfc_buf - MFC buffer
*/
struct s5p_mfc_buf {
+ struct vb2_v4l2_buffer *b;
struct list_head list;
- struct vb2_buffer *b;
union {
struct {
size_t luma;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index aebe4fd7f03a..8c5060a7534f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -22,7 +22,7 @@
#include <linux/workqueue.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include "s5p_mfc_common.h"
#include "s5p_mfc_ctrl.h"
#include "s5p_mfc_debug.h"
@@ -645,17 +645,22 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
mfc_err("Call on DQBUF after unrecoverable error\n");
return -EIO;
}
- if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- ret = vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK);
- else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+
+ switch (buf->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
ret = vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK);
- if (ret == 0 && ctx->state == MFCINST_FINISHED &&
- list_empty(&ctx->vq_dst.done_list))
+ if (ret)
+ return ret;
+
+ if (ctx->state == MFCINST_FINISHED &&
+ (ctx->dst_bufs[buf->index].flags & MFC_BUF_FLAG_EOS))
v4l2_event_queue_fh(&ctx->fh, &ev);
- } else {
- ret = -EINVAL;
+ return 0;
+ default:
+ return -EINVAL;
}
- return ret;
}
/* Export DMA buffer */
@@ -883,7 +888,7 @@ static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
};
static int s5p_mfc_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt, unsigned int *buf_count,
+ const void *parg, unsigned int *buf_count,
unsigned int *plane_count, unsigned int psize[],
void *allocators[])
{
@@ -945,6 +950,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
static int s5p_mfc_buf_init(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
unsigned int i;
@@ -964,8 +970,8 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
mfc_err("Plane buffer (CAPTURE) is too small\n");
return -EINVAL;
}
- i = vb->v4l2_buf.index;
- ctx->dst_bufs[i].b = vb;
+ i = vb->index;
+ ctx->dst_bufs[i].b = vbuf;
ctx->dst_bufs[i].cookie.raw.luma =
vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->dst_bufs[i].cookie.raw.chroma =
@@ -982,8 +988,8 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
return -EINVAL;
}
- i = vb->v4l2_buf.index;
- ctx->src_bufs[i].b = vb;
+ i = vb->index;
+ ctx->src_bufs[i].b = vbuf;
ctx->src_bufs[i].cookie.stream =
vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->src_bufs_cnt++;
@@ -1065,18 +1071,18 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
struct s5p_mfc_buf *mfc_buf;
if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index];
+ mfc_buf = &ctx->src_bufs[vb->index];
mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
spin_lock_irqsave(&dev->irqlock, flags);
list_add_tail(&mfc_buf->list, &ctx->src_queue);
ctx->src_queue_cnt++;
spin_unlock_irqrestore(&dev->irqlock, flags);
} else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index];
+ mfc_buf = &ctx->dst_bufs[vb->index];
mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
/* Mark destination as available for use by MFC */
spin_lock_irqsave(&dev->irqlock, flags);
- set_bit(vb->v4l2_buf.index, &ctx->dec_dst_flag);
+ set_bit(vb->index, &ctx->dec_dst_flag);
list_add_tail(&mfc_buf->list, &ctx->dst_queue);
ctx->dst_queue_cnt++;
spin_unlock_irqrestore(&dev->irqlock, flags);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 2e57e9f45b85..5c678ec9c9f2 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -23,7 +23,7 @@
#include <media/v4l2-event.h>
#include <linux/workqueue.h>
#include <media/v4l2-ctrls.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include "s5p_mfc_common.h"
#include "s5p_mfc_ctrl.h"
#include "s5p_mfc_debug.h"
@@ -773,8 +773,8 @@ static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx)
spin_lock_irqsave(&dev->irqlock, flags);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
- dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
- dst_size = vb2_plane_size(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -796,10 +796,11 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
struct s5p_mfc_buf, list);
list_del(&dst_mb->list);
ctx->dst_queue_cnt--;
- vb2_set_plane_payload(dst_mb->b, 0,
+ vb2_set_plane_payload(&dst_mb->b->vb2_buf, 0,
s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size,
dev));
- vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&dst_mb->b->vb2_buf,
+ VB2_BUF_STATE_DONE);
}
spin_unlock_irqrestore(&dev->irqlock, flags);
}
@@ -831,16 +832,16 @@ static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx)
spin_lock_irqsave(&dev->irqlock, flags);
src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
- src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
- src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
+ src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0);
+ src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1);
s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_frame_buffer, ctx,
src_y_addr, src_c_addr);
spin_unlock_irqrestore(&dev->irqlock, flags);
spin_lock_irqsave(&dev->irqlock, flags);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
- dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
- dst_size = vb2_plane_size(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -869,25 +870,29 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
s5p_mfc_hw_call_void(dev->mfc_ops, get_enc_frame_buffer, ctx,
&enc_y_addr, &enc_c_addr);
list_for_each_entry(mb_entry, &ctx->src_queue, list) {
- mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
- mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
+ mb_y_addr = vb2_dma_contig_plane_dma_addr(
+ &mb_entry->b->vb2_buf, 0);
+ mb_c_addr = vb2_dma_contig_plane_dma_addr(
+ &mb_entry->b->vb2_buf, 1);
if ((enc_y_addr == mb_y_addr) &&
(enc_c_addr == mb_c_addr)) {
list_del(&mb_entry->list);
ctx->src_queue_cnt--;
- vb2_buffer_done(mb_entry->b,
+ vb2_buffer_done(&mb_entry->b->vb2_buf,
VB2_BUF_STATE_DONE);
break;
}
}
list_for_each_entry(mb_entry, &ctx->ref_queue, list) {
- mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
- mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
+ mb_y_addr = vb2_dma_contig_plane_dma_addr(
+ &mb_entry->b->vb2_buf, 0);
+ mb_c_addr = vb2_dma_contig_plane_dma_addr(
+ &mb_entry->b->vb2_buf, 1);
if ((enc_y_addr == mb_y_addr) &&
(enc_c_addr == mb_c_addr)) {
list_del(&mb_entry->list);
ctx->ref_queue_cnt--;
- vb2_buffer_done(mb_entry->b,
+ vb2_buffer_done(&mb_entry->b->vb2_buf,
VB2_BUF_STATE_DONE);
break;
}
@@ -902,9 +907,9 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
list_add_tail(&mb_entry->list, &ctx->ref_queue);
ctx->ref_queue_cnt++;
}
- mfc_debug(2, "enc src count: %d, enc ref count: %d\n",
- ctx->src_queue_cnt, ctx->ref_queue_cnt);
}
+ mfc_debug(2, "enc src count: %d, enc ref count: %d\n",
+ ctx->src_queue_cnt, ctx->ref_queue_cnt);
if ((ctx->dst_queue_cnt > 0) && (strm_size > 0)) {
mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf,
list);
@@ -912,21 +917,22 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
ctx->dst_queue_cnt--;
switch (slice_type) {
case S5P_FIMV_ENC_SI_SLICE_TYPE_I:
- mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ mb_entry->b->flags |= V4L2_BUF_FLAG_KEYFRAME;
break;
case S5P_FIMV_ENC_SI_SLICE_TYPE_P:
- mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+ mb_entry->b->flags |= V4L2_BUF_FLAG_PFRAME;
break;
case S5P_FIMV_ENC_SI_SLICE_TYPE_B:
- mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_BFRAME;
+ mb_entry->b->flags |= V4L2_BUF_FLAG_BFRAME;
break;
}
- vb2_set_plane_payload(mb_entry->b, 0, strm_size);
- vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
+ vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size);
+ vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE);
}
spin_unlock_irqrestore(&dev->irqlock, flags);
if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0))
clear_work_bit(ctx);
+
return 0;
}
@@ -1806,13 +1812,13 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
return -EINVAL;
}
mfc_debug(2, "index: %d, plane[%d] cookie: %pad\n",
- vb->v4l2_buf.index, i, &dma);
+ vb->index, i, &dma);
}
return 0;
}
static int s5p_mfc_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *buf_count, unsigned int *plane_count,
unsigned int psize[], void *allocators[])
{
@@ -1821,7 +1827,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (ctx->state != MFCINST_GOT_INST) {
- mfc_err("inavlid state: %d\n", ctx->state);
+ mfc_err("invalid state: %d\n", ctx->state);
return -EINVAL;
}
@@ -1861,7 +1867,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
}
} else {
- mfc_err("inavlid queue type: %d\n", vq->type);
+ mfc_err("invalid queue type: %d\n", vq->type);
return -EINVAL;
}
return 0;
@@ -1869,6 +1875,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
static int s5p_mfc_buf_init(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
unsigned int i;
@@ -1878,8 +1885,8 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
ret = check_vb_with_fmt(ctx->dst_fmt, vb);
if (ret < 0)
return ret;
- i = vb->v4l2_buf.index;
- ctx->dst_bufs[i].b = vb;
+ i = vb->index;
+ ctx->dst_bufs[i].b = vbuf;
ctx->dst_bufs[i].cookie.stream =
vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->dst_bufs_cnt++;
@@ -1887,15 +1894,15 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
ret = check_vb_with_fmt(ctx->src_fmt, vb);
if (ret < 0)
return ret;
- i = vb->v4l2_buf.index;
- ctx->src_bufs[i].b = vb;
+ i = vb->index;
+ ctx->src_bufs[i].b = vbuf;
ctx->src_bufs[i].cookie.raw.luma =
vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->src_bufs[i].cookie.raw.chroma =
vb2_dma_contig_plane_dma_addr(vb, 1);
ctx->src_bufs_cnt++;
} else {
- mfc_err("inavlid queue type: %d\n", vq->type);
+ mfc_err("invalid queue type: %d\n", vq->type);
return -EINVAL;
}
return 0;
@@ -1931,7 +1938,7 @@ static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
return -EINVAL;
}
} else {
- mfc_err("inavlid queue type: %d\n", vq->type);
+ mfc_err("invalid queue type: %d\n", vq->type);
return -EINVAL;
}
return 0;
@@ -2012,7 +2019,7 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
return;
}
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index];
+ mfc_buf = &ctx->dst_bufs[vb->index];
mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
/* Mark destination as available for use by MFC */
spin_lock_irqsave(&dev->irqlock, flags);
@@ -2020,7 +2027,7 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
ctx->dst_queue_cnt++;
spin_unlock_irqrestore(&dev->irqlock, flags);
} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index];
+ mfc_buf = &ctx->src_bufs[vb->index];
mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
spin_lock_irqsave(&dev->irqlock, flags);
list_add_tail(&mfc_buf->list, &ctx->src_queue);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 6402f76cc620..873c933bc7d4 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1208,11 +1208,11 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
temp_vb->flags |= MFC_BUF_FLAG_USED;
s5p_mfc_set_dec_stream_buffer_v5(ctx,
- vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
- ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused);
+ vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
+ ctx->consumed_stream, temp_vb->b->vb2_buf.planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
- if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
+ if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) {
last_frame = MFC_DEC_LAST_FRAME;
mfc_debug(2, "Setting ctx->state to FINISHING\n");
ctx->state = MFCINST_FINISHING;
@@ -1249,16 +1249,16 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
list);
src_mb->flags |= MFC_BUF_FLAG_USED;
- if (src_mb->b->v4l2_planes[0].bytesused == 0) {
+ if (src_mb->b->vb2_buf.planes[0].bytesused == 0) {
/* send null frame */
s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2,
dev->bank2);
ctx->state = MFCINST_FINISHING;
} else {
- src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b,
- 0);
- src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b,
- 1);
+ src_y_addr = vb2_dma_contig_plane_dma_addr(
+ &src_mb->b->vb2_buf, 0);
+ src_c_addr = vb2_dma_contig_plane_dma_addr(
+ &src_mb->b->vb2_buf, 1);
s5p_mfc_set_enc_frame_buffer_v5(ctx, src_y_addr,
src_c_addr);
if (src_mb->flags & MFC_BUF_FLAG_EOS)
@@ -1267,13 +1267,13 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
}
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
dst_mb->flags |= MFC_BUF_FLAG_USED;
- dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
- dst_size = vb2_plane_size(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
mfc_debug(2, "encoding buffer with index=%d state=%d\n",
- src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
+ src_mb ? src_mb->b->vb2_buf.index : -1, ctx->state);
s5p_mfc_encode_one_frame_v5(ctx);
return 0;
}
@@ -1289,10 +1289,11 @@ static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
mfc_debug(2, "Preparing to init decoding\n");
temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
s5p_mfc_set_dec_desc_buffer(ctx);
- mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
+ mfc_debug(2, "Header size: %d\n",
+ temp_vb->b->vb2_buf.planes[0].bytesused);
s5p_mfc_set_dec_stream_buffer_v5(ctx,
- vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
- 0, temp_vb->b->v4l2_planes[0].bytesused);
+ vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
+ 0, temp_vb->b->vb2_buf.planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
s5p_mfc_init_decode_v5(ctx);
@@ -1309,8 +1310,8 @@ static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_enc_ref_buffer_v5(ctx);
spin_lock_irqsave(&dev->irqlock, flags);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
- dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
- dst_size = vb2_plane_size(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
@@ -1342,10 +1343,11 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
return -EIO;
}
temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
- mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
+ mfc_debug(2, "Header size: %d\n",
+ temp_vb->b->vb2_buf.planes[0].bytesused);
s5p_mfc_set_dec_stream_buffer_v5(ctx,
- vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
- 0, temp_vb->b->v4l2_planes[0].bytesused);
+ vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
+ 0, temp_vb->b->vb2_buf.planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
ret = s5p_mfc_set_dec_frame_buffer_v5(ctx);
@@ -1478,9 +1480,9 @@ static void s5p_mfc_cleanup_queue_v5(struct list_head *lh, struct vb2_queue *vq)
while (!list_empty(lh)) {
b = list_entry(lh->next, struct s5p_mfc_buf, list);
- for (i = 0; i < b->b->num_planes; i++)
- vb2_set_plane_payload(b->b, i, 0);
- vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR);
+ for (i = 0; i < b->b->vb2_buf.num_planes; i++)
+ vb2_set_plane_payload(&b->b->vb2_buf, i, 0);
+ vb2_buffer_done(&b->b->vb2_buf, VB2_BUF_STATE_ERROR);
list_del(&b->list);
}
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index e5cb30e1f718..b95845347348 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -522,7 +522,7 @@ static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx,
writel(addr, mfc_regs->e_stream_buffer_addr); /* 16B align */
writel(size, mfc_regs->e_stream_buffer_size);
- mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d\n",
+ mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%x\n",
addr, size);
return 0;
@@ -554,7 +554,7 @@ static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
enc_recon_y_addr = readl(mfc_regs->e_recon_luma_dpb_addr);
enc_recon_c_addr = readl(mfc_regs->e_recon_chroma_dpb_addr);
- mfc_debug(2, "recon y addr: 0x%08lx\n", enc_recon_y_addr);
+ mfc_debug(2, "recon y addr: 0x%08lx y_addr: 0x%08lx\n", enc_recon_y_addr, *y_addr);
mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr);
}
@@ -1483,6 +1483,7 @@ static int s5p_mfc_encode_one_frame_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
+ int cmd;
mfc_debug(2, "++\n");
@@ -1493,9 +1494,13 @@ static int s5p_mfc_encode_one_frame_v6(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_slice_mode(ctx);
+ if (ctx->state != MFCINST_FINISHING)
+ cmd = S5P_FIMV_CH_FRAME_START_V6;
+ else
+ cmd = S5P_FIMV_CH_LAST_FRAME_V6;
+
writel(ctx->inst_no, mfc_regs->instance_id);
- s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev,
- S5P_FIMV_CH_FRAME_START_V6, NULL);
+ s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev, cmd, NULL);
mfc_debug(2, "--\n");
@@ -1562,13 +1567,13 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx)
temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
temp_vb->flags |= MFC_BUF_FLAG_USED;
s5p_mfc_set_dec_stream_buffer_v6(ctx,
- vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
+ vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
ctx->consumed_stream,
- temp_vb->b->v4l2_planes[0].bytesused);
+ temp_vb->b->vb2_buf.planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
- if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
+ if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) {
last_frame = 1;
mfc_debug(2, "Setting ctx->state to FINISHING\n");
ctx->state = MFCINST_FINISHING;
@@ -1592,7 +1597,7 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
spin_lock_irqsave(&dev->irqlock, flags);
- if (list_empty(&ctx->src_queue)) {
+ if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) {
mfc_debug(2, "no src buffers.\n");
spin_unlock_irqrestore(&dev->irqlock, flags);
return -EAGAIN;
@@ -1604,20 +1609,33 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
return -EAGAIN;
}
- src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
- src_mb->flags |= MFC_BUF_FLAG_USED;
- src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
- src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
+ if (list_empty(&ctx->src_queue)) {
+ /* send null frame */
+ s5p_mfc_set_enc_frame_buffer_v6(ctx, 0, 0);
+ src_mb = NULL;
+ } else {
+ src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ src_mb->flags |= MFC_BUF_FLAG_USED;
+ if (src_mb->b->vb2_buf.planes[0].bytesused == 0) {
+ s5p_mfc_set_enc_frame_buffer_v6(ctx, 0, 0);
+ ctx->state = MFCINST_FINISHING;
+ } else {
+ src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0);
+ src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1);
- mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr);
- mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr);
+ mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr);
+ mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr);
- s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr);
+ s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr);
+ if (src_mb->flags & MFC_BUF_FLAG_EOS)
+ ctx->state = MFCINST_FINISHING;
+ }
+ }
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
dst_mb->flags |= MFC_BUF_FLAG_USED;
- dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
- dst_size = vb2_plane_size(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size);
@@ -1639,10 +1657,10 @@ static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
spin_lock_irqsave(&dev->irqlock, flags);
mfc_debug(2, "Preparing to init decoding.\n");
temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
- mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
+ mfc_debug(2, "Header size: %d\n", temp_vb->b->vb2_buf.planes[0].bytesused);
s5p_mfc_set_dec_stream_buffer_v6(ctx,
- vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0,
- temp_vb->b->v4l2_planes[0].bytesused);
+ vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), 0,
+ temp_vb->b->vb2_buf.planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
s5p_mfc_init_decode_v6(ctx);
@@ -1659,8 +1677,8 @@ static inline void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
spin_lock_irqsave(&dev->irqlock, flags);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
- dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
- dst_size = vb2_plane_size(dst_mb->b, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
@@ -1836,9 +1854,9 @@ static void s5p_mfc_cleanup_queue_v6(struct list_head *lh, struct vb2_queue *vq)
while (!list_empty(lh)) {
b = list_entry(lh->next, struct s5p_mfc_buf, list);
- for (i = 0; i < b->b->num_planes; i++)
- vb2_set_plane_payload(b->b, i, 0);
- vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR);
+ for (i = 0; i < b->b->vb2_buf.num_planes; i++)
+ vb2_set_plane_payload(&b->b->vb2_buf, i, 0);
+ vb2_buffer_done(&b->b->vb2_buf, VB2_BUF_STATE_ERROR);
list_del(&b->list);
}
}
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index fb2acc53112a..42cd2709c41c 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -24,7 +24,7 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <media/v4l2-device.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include "regs-mixer.h"
@@ -113,7 +113,7 @@ struct mxr_geometry {
/** instance of a buffer */
struct mxr_buffer {
/** common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
/** node for layer's lists */
struct list_head list;
};
diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
index 74344c764daa..db3163b23ea0 100644
--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
@@ -86,7 +86,7 @@ static void mxr_graph_buffer_set(struct mxr_layer *layer,
dma_addr_t addr = 0;
if (buf)
- addr = vb2_dma_contig_plane_dma_addr(&buf->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
mxr_reg_graph_buffer(layer->mdev, layer->idx, addr);
}
diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
index 5127acb1e571..a0ec14a1da13 100644
--- a/drivers/media/platform/s5p-tv/mixer_reg.c
+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
@@ -279,7 +279,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
layer->ops.buffer_set(layer, layer->update_buf);
if (done && done != layer->shadow_buf)
- vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&done->vb.vb2_buf, VB2_BUF_STATE_DONE);
done:
spin_unlock(&layer->enq_slock);
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 751f3b618337..dc1c679e136c 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -881,7 +881,7 @@ static const struct v4l2_file_operations mxr_fops = {
.unlocked_ioctl = video_ioctl2,
};
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+static int queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
void *alloc_ctxs[])
{
@@ -914,7 +914,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
static void buf_queue(struct vb2_buffer *vb)
{
- struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mxr_buffer *buffer = container_of(vbuf, struct mxr_buffer, vb);
struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
struct mxr_device *mdev = layer->mdev;
unsigned long flags;
@@ -963,11 +964,13 @@ static void mxr_watchdog(unsigned long arg)
if (layer->update_buf == layer->shadow_buf)
layer->update_buf = NULL;
if (layer->update_buf) {
- vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&layer->update_buf->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
layer->update_buf = NULL;
}
if (layer->shadow_buf) {
- vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&layer->shadow_buf->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
layer->shadow_buf = NULL;
}
spin_unlock_irqrestore(&layer->enq_slock, flags);
@@ -991,7 +994,7 @@ static void stop_streaming(struct vb2_queue *vq)
/* set all buffer to be done */
list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&layer->enq_slock, flags);
diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
index c9388c45ad75..dd002a497dbb 100644
--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
@@ -97,9 +97,10 @@ static void mxr_vp_buffer_set(struct mxr_layer *layer,
mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
return;
}
- luma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 0);
+ luma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
if (layer->fmt->num_subframes == 2) {
- chroma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 1);
+ chroma_addr[0] =
+ vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 1);
} else {
/* FIXME: mxr_get_plane_size compute integer division,
* which is slow and should not be performed in interrupt */
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index f5e3eb3a20ff..d6ab33e7060a 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -865,10 +865,11 @@ static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = {
/* ========== Queue operations ========== */
static int sh_veu_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *f,
+ const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *f = parg;
struct sh_veu_dev *veu = vb2_get_drv_priv(vq);
struct sh_veu_vfmt *vfmt;
unsigned int size, count = *nbuffers;
@@ -931,9 +932,10 @@ static int sh_veu_buf_prepare(struct vb2_buffer *vb)
static void sh_veu_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
- dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->v4l2_buf.type);
- v4l2_m2m_buf_queue(veu->m2m_ctx, vb);
+ dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->type);
+ v4l2_m2m_buf_queue(veu->m2m_ctx, vbuf);
}
static const struct vb2_ops sh_veu_qops = {
@@ -1084,8 +1086,8 @@ static irqreturn_t sh_veu_bh(int irq, void *dev_id)
static irqreturn_t sh_veu_isr(int irq, void *dev_id)
{
struct sh_veu_dev *veu = dev_id;
- struct vb2_buffer *dst;
- struct vb2_buffer *src;
+ struct vb2_v4l2_buffer *dst;
+ struct vb2_v4l2_buffer *src;
u32 status = sh_veu_reg_read(veu, VEU_EVTR);
/* bundle read mode not used */
@@ -1105,11 +1107,11 @@ static irqreturn_t sh_veu_isr(int irq, void *dev_id)
if (!src || !dst)
return IRQ_NONE;
- dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp;
- dst->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst->v4l2_buf.flags |=
- src->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst->v4l2_buf.timecode = src->v4l2_buf.timecode;
+ dst->timestamp = src->timestamp;
+ dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->flags |=
+ src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->timecode = src->timecode;
spin_lock(&veu->lock);
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index fe5c8ab06bd5..2231f8922df3 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -27,6 +27,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
/* Mirror addresses are not available for all registers */
@@ -62,11 +63,12 @@ enum sh_vou_status {
#define VOU_MIN_IMAGE_HEIGHT 16
struct sh_vou_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
-static inline struct sh_vou_buffer *to_sh_vou_buffer(struct vb2_buffer *vb2)
+static inline struct
+sh_vou_buffer *to_sh_vou_buffer(struct vb2_v4l2_buffer *vb2)
{
return container_of(vb2, struct sh_vou_buffer, vb);
}
@@ -193,11 +195,11 @@ static struct sh_vou_fmt vou_fmt[] = {
};
static void sh_vou_schedule_next(struct sh_vou_device *vou_dev,
- struct vb2_buffer *vb)
+ struct vb2_v4l2_buffer *vbuf)
{
dma_addr_t addr1, addr2;
- addr1 = vb2_dma_contig_plane_dma_addr(vb, 0);
+ addr1 = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
switch (vou_dev->pix.pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
@@ -241,10 +243,11 @@ static void sh_vou_stream_config(struct sh_vou_device *vou_dev)
}
/* Locking: caller holds fop_lock mutex */
-static int sh_vou_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int sh_vou_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
struct v4l2_pix_format *pix = &vou_dev->pix;
int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
@@ -282,8 +285,9 @@ static int sh_vou_buf_prepare(struct vb2_buffer *vb)
/* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */
static void sh_vou_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue);
- struct sh_vou_buffer *shbuf = to_sh_vou_buffer(vb);
+ struct sh_vou_buffer *shbuf = to_sh_vou_buffer(vbuf);
unsigned long flags;
spin_lock_irqsave(&vou_dev->lock, flags);
@@ -302,7 +306,8 @@ static int sh_vou_start_streaming(struct vb2_queue *vq, unsigned int count)
video, s_stream, 1);
if (ret < 0 && ret != -ENOIOCTLCMD) {
list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
list_del(&buf->list);
}
vou_dev->active = NULL;
@@ -353,7 +358,7 @@ static void sh_vou_stop_streaming(struct vb2_queue *vq)
msleep(50);
spin_lock_irqsave(&vou_dev->lock, flags);
list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
list_del(&buf->list);
}
vou_dev->active = NULL;
@@ -1066,10 +1071,10 @@ static irqreturn_t sh_vou_isr(int irq, void *dev_id)
list_del(&vb->list);
- v4l2_get_timestamp(&vb->vb.v4l2_buf.timestamp);
- vb->vb.v4l2_buf.sequence = vou_dev->sequence++;
- vb->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
- vb2_buffer_done(&vb->vb, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&vb->vb.timestamp);
+ vb->vb.sequence = vou_dev->sequence++;
+ vb->vb.field = V4L2_FIELD_INTERLACED;
+ vb2_buffer_done(&vb->vb.vb2_buf, VB2_BUF_STATE_DONE);
vou_dev->active = list_entry(vou_dev->buf_list.next,
struct sh_vou_buffer, list);
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 90701726a06a..454f68f0cdad 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -23,12 +23,13 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include <media/atmel-isi.h>
#include <media/soc_camera.h>
#include <media/soc_mediabus.h>
#include <media/v4l2-of.h>
#include <media/videobuf2-dma-contig.h>
+#include "atmel-isi.h"
+
#define MAX_BUFFER_NUM 32
#define MAX_SUPPORT_WIDTH 2048
#define MAX_SUPPORT_HEIGHT 2048
@@ -59,7 +60,7 @@ struct isi_dma_desc {
/* Frame buffer data */
struct frame_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct isi_dma_desc *p_dma_desc;
struct list_head list;
};
@@ -102,62 +103,71 @@ static u32 isi_readl(struct atmel_isi *isi, u32 reg)
return readl(isi->regs + reg);
}
-static int configure_geometry(struct atmel_isi *isi, u32 width,
+static void configure_geometry(struct atmel_isi *isi, u32 width,
u32 height, u32 code)
{
- u32 cfg2, cr;
+ u32 cfg2;
+ /* According to sensor's output format to set cfg2 */
switch (code) {
- /* YUV, including grey */
+ default:
+ /* Grey */
case MEDIA_BUS_FMT_Y8_1X8:
- cr = ISI_CFG2_GRAYSCALE;
+ cfg2 = ISI_CFG2_GRAYSCALE | ISI_CFG2_COL_SPACE_YCbCr;
break;
+ /* YUV */
case MEDIA_BUS_FMT_VYUY8_2X8:
- cr = ISI_CFG2_YCC_SWAP_MODE_3;
+ cfg2 = ISI_CFG2_YCC_SWAP_MODE_3 | ISI_CFG2_COL_SPACE_YCbCr;
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
- cr = ISI_CFG2_YCC_SWAP_MODE_2;
+ cfg2 = ISI_CFG2_YCC_SWAP_MODE_2 | ISI_CFG2_COL_SPACE_YCbCr;
break;
case MEDIA_BUS_FMT_YVYU8_2X8:
- cr = ISI_CFG2_YCC_SWAP_MODE_1;
+ cfg2 = ISI_CFG2_YCC_SWAP_MODE_1 | ISI_CFG2_COL_SPACE_YCbCr;
break;
case MEDIA_BUS_FMT_YUYV8_2X8:
- cr = ISI_CFG2_YCC_SWAP_DEFAULT;
+ cfg2 = ISI_CFG2_YCC_SWAP_DEFAULT | ISI_CFG2_COL_SPACE_YCbCr;
break;
/* RGB, TODO */
- default:
- return -EINVAL;
}
isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
-
- cfg2 = isi_readl(isi, ISI_CFG2);
- /* Set YCC swap mode */
- cfg2 &= ~ISI_CFG2_YCC_SWAP_MODE_MASK;
- cfg2 |= cr;
/* Set width */
- cfg2 &= ~(ISI_CFG2_IM_HSIZE_MASK);
cfg2 |= ((width - 1) << ISI_CFG2_IM_HSIZE_OFFSET) &
ISI_CFG2_IM_HSIZE_MASK;
/* Set height */
- cfg2 &= ~(ISI_CFG2_IM_VSIZE_MASK);
cfg2 |= ((height - 1) << ISI_CFG2_IM_VSIZE_OFFSET)
& ISI_CFG2_IM_VSIZE_MASK;
isi_writel(isi, ISI_CFG2, cfg2);
+}
- return 0;
+static bool is_supported(struct soc_camera_device *icd,
+ const u32 pixformat)
+{
+ switch (pixformat) {
+ /* YUV, including grey */
+ case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ return true;
+ /* RGB, TODO */
+ default:
+ return false;
+ }
}
static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
{
if (isi->active) {
- struct vb2_buffer *vb = &isi->active->vb;
+ struct vb2_v4l2_buffer *vbuf = &isi->active->vb;
struct frame_buffer *buf = isi->active;
list_del_init(&buf->list);
- v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
- vb->v4l2_buf.sequence = isi->sequence++;
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&vbuf->timestamp);
+ vbuf->sequence = isi->sequence++;
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
}
if (list_empty(&isi->video_buffer_list)) {
@@ -225,7 +235,7 @@ static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
}
timeout = wait_for_completion_timeout(&isi->complete,
- msecs_to_jiffies(100));
+ msecs_to_jiffies(500));
if (timeout == 0)
return -ETIMEDOUT;
@@ -235,7 +245,7 @@ static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
/* ------------------------------------------------------------------
Videobuf operations
------------------------------------------------------------------*/
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -267,7 +277,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
static int buffer_init(struct vb2_buffer *vb)
{
- struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
buf->p_dma_desc = NULL;
INIT_LIST_HEAD(&buf->list);
@@ -277,8 +288,9 @@ static int buffer_init(struct vb2_buffer *vb)
static int buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+ struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct atmel_isi *isi = ici->priv;
unsigned long size;
@@ -292,7 +304,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
return -EINVAL;
}
- vb2_set_plane_payload(&buf->vb, 0, size);
+ vb2_set_plane_payload(vb, 0, size);
if (!buf->p_dma_desc) {
if (list_empty(&isi->dma_desc_head)) {
@@ -319,10 +331,11 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_cleanup(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct atmel_isi *isi = ici->priv;
- struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+ struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
/* This descriptor is available now and we add to head list */
if (buf->p_dma_desc)
@@ -360,10 +373,11 @@ static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct atmel_isi *isi = ici->priv;
- struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+ struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
unsigned long flags = 0;
spin_lock_irqsave(&isi->lock, flags);
@@ -396,6 +410,9 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
/* Disable all interrupts */
isi_writel(isi, ISI_INTDIS, (u32)~0UL);
+ configure_geometry(isi, icd->user_width, icd->user_height,
+ icd->current_fmt->code);
+
spin_lock_irq(&isi->lock);
/* Clear any pending interrupt */
isi_readl(isi, ISI_STATUS);
@@ -422,7 +439,7 @@ static void stop_streaming(struct vb2_queue *vq)
/* Release all active buffers */
list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
list_del_init(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irq(&isi->lock);
@@ -483,8 +500,6 @@ static int isi_camera_init_videobuf(struct vb2_queue *q,
static int isi_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct atmel_isi *isi = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -494,6 +509,10 @@ static int isi_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_mbus_framefmt *mf = &format.format;
int ret;
+ /* check with atmel-isi support format, if not support use YUYV */
+ if (!is_supported(icd, pix->pixelformat))
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
dev_warn(icd->parent, "Format %x not found\n",
@@ -517,16 +536,6 @@ static int isi_camera_set_fmt(struct soc_camera_device *icd,
if (mf->code != xlate->code)
return -EINVAL;
- /* Enable PM and peripheral clock before operate isi registers */
- pm_runtime_get_sync(ici->v4l2_dev.dev);
-
- ret = configure_geometry(isi, pix->width, pix->height, xlate->code);
-
- pm_runtime_put(ici->v4l2_dev.dev);
-
- if (ret < 0)
- return ret;
-
pix->width = mf->width;
pix->height = mf->height;
pix->field = mf->field;
@@ -553,6 +562,10 @@ static int isi_camera_try_fmt(struct soc_camera_device *icd,
u32 pixfmt = pix->pixelformat;
int ret;
+ /* check with atmel-isi support format, if not support use YUYV */
+ if (!is_supported(icd, pix->pixelformat))
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (pixfmt && !xlate) {
dev_warn(icd->parent, "Format %x not found\n", pixfmt);
@@ -824,6 +837,11 @@ static int isi_camera_set_bus_param(struct soc_camera_device *icd)
if (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
cfg1 |= ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING;
+ dev_dbg(icd->parent, "vsync active %s, hsync active %s, sampling on pix clock %s edge\n",
+ common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? "low" : "high",
+ common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? "low" : "high",
+ common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING ? "falling" : "rising");
+
if (isi->pdata.has_emb_sync)
cfg1 |= ISI_CFG1_EMB_SYNC;
if (isi->pdata.full_mode)
@@ -873,7 +891,7 @@ static int atmel_isi_remove(struct platform_device *pdev)
return 0;
}
-static int atmel_isi_probe_dt(struct atmel_isi *isi,
+static int atmel_isi_parse_dt(struct atmel_isi *isi,
struct platform_device *pdev)
{
struct device_node *np= pdev->dev.of_node;
@@ -891,9 +909,10 @@ static int atmel_isi_probe_dt(struct atmel_isi *isi,
}
err = v4l2_of_parse_endpoint(np, &ep);
+ of_node_put(np);
if (err) {
dev_err(&pdev->dev, "Could not parse the endpoint\n");
- goto err_probe_dt;
+ return err;
}
switch (ep.bus.parallel.bus_width) {
@@ -907,14 +926,20 @@ static int atmel_isi_probe_dt(struct atmel_isi *isi,
default:
dev_err(&pdev->dev, "Unsupported bus width: %d\n",
ep.bus.parallel.bus_width);
- err = -EINVAL;
- goto err_probe_dt;
+ return -EINVAL;
}
-err_probe_dt:
- of_node_put(np);
+ if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ isi->pdata.hsync_act_low = true;
+ if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ isi->pdata.vsync_act_low = true;
+ if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ isi->pdata.pclk_act_falling = true;
- return err;
+ if (ep.bus_type == V4L2_MBUS_BT656)
+ isi->pdata.has_emb_sync = true;
+
+ return 0;
}
static int atmel_isi_probe(struct platform_device *pdev)
@@ -923,16 +948,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
struct atmel_isi *isi;
struct resource *regs;
int ret, i;
- struct device *dev = &pdev->dev;
struct soc_camera_host *soc_host;
- struct isi_platform_data *pdata;
-
- pdata = dev->platform_data;
- if ((!pdata || !pdata->data_width_flags) && !pdev->dev.of_node) {
- dev_err(&pdev->dev,
- "No config available for Atmel ISI\n");
- return -EINVAL;
- }
isi = devm_kzalloc(&pdev->dev, sizeof(struct atmel_isi), GFP_KERNEL);
if (!isi) {
@@ -944,13 +960,9 @@ static int atmel_isi_probe(struct platform_device *pdev)
if (IS_ERR(isi->pclk))
return PTR_ERR(isi->pclk);
- if (pdata) {
- memcpy(&isi->pdata, pdata, sizeof(isi->pdata));
- } else {
- ret = atmel_isi_probe_dt(isi, pdev);
- if (ret)
- return ret;
- }
+ ret = atmel_isi_parse_dt(isi, pdev);
+ if (ret)
+ return ret;
isi->active = NULL;
spin_lock_init(&isi->lock);
@@ -1014,11 +1026,6 @@ static int atmel_isi_probe(struct platform_device *pdev)
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
- if (isi->pdata.asd_sizes) {
- soc_host->asd = isi->pdata.asd;
- soc_host->asd_sizes = isi->pdata.asd_sizes;
- }
-
ret = soc_camera_host_register(soc_host);
if (ret) {
dev_err(&pdev->dev, "Unable to register soc camera host\n");
@@ -1040,6 +1047,7 @@ err_alloc_ctx:
return ret;
}
+#ifdef CONFIG_PM
static int atmel_isi_runtime_suspend(struct device *dev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(dev);
@@ -1058,6 +1066,7 @@ static int atmel_isi_runtime_resume(struct device *dev)
return clk_prepare_enable(isi->pclk);
}
+#endif /* CONFIG_PM */
static const struct dev_pm_ops atmel_isi_dev_pm_ops = {
SET_RUNTIME_PM_OPS(atmel_isi_runtime_suspend,
diff --git a/include/media/atmel-isi.h b/drivers/media/platform/soc_camera/atmel-isi.h
index 6008b0985b7b..5acc771d2edc 100644
--- a/include/media/atmel-isi.h
+++ b/drivers/media/platform/soc_camera/atmel-isi.h
@@ -66,6 +66,8 @@
/* Bitfields in CFG2 */
#define ISI_CFG2_GRAYSCALE (1 << 13)
+#define ISI_CFG2_COL_SPACE_YCbCr (0 << 15)
+#define ISI_CFG2_COL_SPACE_RGB (1 << 15)
/* Constants for YCC_SWAP(ISI_V2) */
#define ISI_CFG2_YCC_SWAP_DEFAULT (0 << 28)
#define ISI_CFG2_YCC_SWAP_MODE_1 (1 << 28)
@@ -114,7 +116,6 @@ struct v4l2_async_subdev;
struct isi_platform_data {
u8 has_emb_sync;
- u8 emb_crc_sync;
u8 hsync_act_low;
u8 vsync_act_low;
u8 pclk_act_falling;
@@ -122,10 +123,6 @@ struct isi_platform_data {
u32 data_width_flags;
/* Using for ISI_CFG1 */
u32 frate;
- /* Using for ISI_MCK */
- u32 mck_hz;
- struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
- int *asd_sizes; /* 0-terminated array of asd group sizes */
};
#endif /* __ATMEL_ISI_H__ */
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index ea4c423f0cf8..1f28d21a3c9a 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -32,7 +32,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/soc_camera.h>
#include <media/soc_mediabus.h>
@@ -225,7 +225,7 @@ struct mx2_buf_internal {
/* buffer for one video frame */
struct mx2_buffer {
/* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct mx2_buf_internal internal;
};
@@ -469,10 +469,11 @@ static void mx2_camera_clock_stop(struct soc_camera_host *ici)
* Videobuf operations
*/
static int mx2_videobuf_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *count, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
@@ -530,11 +531,12 @@ out:
static void mx2_videobuf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
struct soc_camera_host *ici =
to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
- struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
+ struct mx2_buffer *buf = container_of(vbuf, struct mx2_buffer, vb);
unsigned long flags;
dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
@@ -664,7 +666,7 @@ static int mx2_start_streaming(struct vb2_queue *q, unsigned int count)
buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
internal.queue);
buf->internal.bufnum = 0;
- vb = &buf->vb;
+ vb = &buf->vb.vb2_buf;
phys = vb2_dma_contig_plane_dma_addr(vb, 0);
mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
@@ -673,7 +675,7 @@ static int mx2_start_streaming(struct vb2_queue *q, unsigned int count)
buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
internal.queue);
buf->internal.bufnum = 1;
- vb = &buf->vb;
+ vb = &buf->vb.vb2_buf;
phys = vb2_dma_contig_plane_dma_addr(vb, 0);
mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
@@ -1307,6 +1309,7 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
struct mx2_buf_internal *ibuf;
struct mx2_buffer *buf;
struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vbuf;
unsigned long phys;
ibuf = list_first_entry(&pcdev->active_bufs, struct mx2_buf_internal,
@@ -1323,7 +1326,8 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
} else {
buf = mx2_ibuf_to_buf(ibuf);
- vb = &buf->vb;
+ vb = &buf->vb.vb2_buf;
+ vbuf = to_vb2_v4l2_buffer(vb);
#ifdef DEBUG
phys = vb2_dma_contig_plane_dma_addr(vb, 0);
if (prp->cfg.channel == 1) {
@@ -1347,8 +1351,8 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
vb2_get_plane_payload(vb, 0));
list_del_init(&buf->internal.queue);
- v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
- vb->v4l2_buf.sequence = pcdev->frame_count;
+ v4l2_get_timestamp(&vbuf->timestamp);
+ vbuf->sequence = pcdev->frame_count;
if (err)
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
else
@@ -1380,7 +1384,7 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
- vb = &buf->vb;
+ vb = &buf->vb.vb2_buf;
phys = vb2_dma_contig_plane_dma_addr(vb, 0);
mx27_update_emma_buf(pcdev, phys, bufnum);
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index ace41f53caca..49c3a257a916 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -63,7 +63,7 @@
struct mx3_camera_buffer {
/* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head queue;
/* One descriptot per scatterlist (per frame) */
@@ -133,7 +133,7 @@ static void csi_reg_write(struct mx3_camera_dev *mx3, u32 value, off_t reg)
__raw_writel(value, mx3->base + reg);
}
-static struct mx3_camera_buffer *to_mx3_vb(struct vb2_buffer *vb)
+static struct mx3_camera_buffer *to_mx3_vb(struct vb2_v4l2_buffer *vb)
{
return container_of(vb, struct mx3_camera_buffer, vb);
}
@@ -151,14 +151,14 @@ static void mx3_cam_dma_done(void *arg)
spin_lock(&mx3_cam->lock);
if (mx3_cam->active) {
- struct vb2_buffer *vb = &mx3_cam->active->vb;
+ struct vb2_v4l2_buffer *vb = &mx3_cam->active->vb;
struct mx3_camera_buffer *buf = to_mx3_vb(vb);
list_del_init(&buf->queue);
- v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
- vb->v4l2_buf.field = mx3_cam->field;
- vb->v4l2_buf.sequence = mx3_cam->sequence++;
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&vb->timestamp);
+ vb->field = mx3_cam->field;
+ vb->sequence = mx3_cam->sequence++;
+ vb2_buffer_done(&vb->vb2_buf, VB2_BUF_STATE_DONE);
}
if (list_empty(&mx3_cam->capture)) {
@@ -185,10 +185,11 @@ static void mx3_cam_dma_done(void *arg)
* Calculate the __buffer__ (not data) size and number of buffers.
*/
static int mx3_videobuf_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *count, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
@@ -257,10 +258,11 @@ static enum pixel_fmt fourcc_to_ipu_pix(__u32 fourcc)
static void mx3_videobuf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
- struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+ struct mx3_camera_buffer *buf = to_mx3_vb(vbuf);
struct scatterlist *sg = &buf->sg;
struct dma_async_tx_descriptor *txd;
struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
@@ -273,7 +275,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
if (vb2_plane_size(vb, 0) < new_size) {
dev_err(icd->parent, "Buffer #%d too small (%lu < %zu)\n",
- vb->v4l2_buf.index, vb2_plane_size(vb, 0), new_size);
+ vbuf->vb2_buf.index, vb2_plane_size(vb, 0), new_size);
goto error;
}
@@ -357,10 +359,11 @@ error:
static void mx3_videobuf_release(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
- struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+ struct mx3_camera_buffer *buf = to_mx3_vb(vbuf);
struct dma_async_tx_descriptor *txd = buf->txd;
unsigned long flags;
@@ -390,10 +393,11 @@ static void mx3_videobuf_release(struct vb2_buffer *vb)
static int mx3_videobuf_init(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
- struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+ struct mx3_camera_buffer *buf = to_mx3_vb(vbuf);
if (!buf->txd) {
/* This is for locking debugging only */
@@ -424,7 +428,7 @@ static void mx3_stop_streaming(struct vb2_queue *q)
list_for_each_entry_safe(buf, tmp, &mx3_cam->capture, queue) {
list_del_init(&buf->queue);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&mx3_cam->lock, flags);
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 71dd71c0bd1f..efe57b23fac1 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -478,7 +478,7 @@ struct rcar_vin_priv {
struct soc_camera_host ici;
struct list_head capture;
#define MAX_BUFFER_NUM 3
- struct vb2_buffer *queue_buf[MAX_BUFFER_NUM];
+ struct vb2_v4l2_buffer *queue_buf[MAX_BUFFER_NUM];
struct vb2_alloc_ctx *alloc_ctx;
enum v4l2_field field;
unsigned int pdata_flags;
@@ -492,7 +492,7 @@ struct rcar_vin_priv {
#define is_continuous_transfer(priv) (priv->vb_count > MAX_BUFFER_NUM)
struct rcar_vin_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
@@ -527,11 +527,12 @@ struct rcar_vin_cam {
* required
*/
static int rcar_vin_videobuf_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *count,
unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
@@ -748,7 +749,7 @@ static int rcar_vin_hw_ready(struct rcar_vin_priv *priv)
/* Moves a buffer from the queue to the HW slots */
static int rcar_vin_fill_hw_slot(struct rcar_vin_priv *priv)
{
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vbuf;
dma_addr_t phys_addr_top;
int slot;
@@ -760,10 +761,11 @@ static int rcar_vin_fill_hw_slot(struct rcar_vin_priv *priv)
if (slot < 0)
return 0;
- vb = &list_entry(priv->capture.next, struct rcar_vin_buffer, list)->vb;
- list_del_init(to_buf_list(vb));
- priv->queue_buf[slot] = vb;
- phys_addr_top = vb2_dma_contig_plane_dma_addr(vb, 0);
+ vbuf = &list_entry(priv->capture.next,
+ struct rcar_vin_buffer, list)->vb;
+ list_del_init(to_buf_list(vbuf));
+ priv->queue_buf[slot] = vbuf;
+ phys_addr_top = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
iowrite32(phys_addr_top, priv->base + VNMB_REG(slot));
return 1;
@@ -771,6 +773,7 @@ static int rcar_vin_fill_hw_slot(struct rcar_vin_priv *priv)
static void rcar_vin_videobuf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
@@ -780,7 +783,7 @@ static void rcar_vin_videobuf_queue(struct vb2_buffer *vb)
if (vb2_plane_size(vb, 0) < size) {
dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
- vb->v4l2_buf.index, vb2_plane_size(vb, 0), size);
+ vb->index, vb2_plane_size(vb, 0), size);
goto error;
}
@@ -791,14 +794,14 @@ static void rcar_vin_videobuf_queue(struct vb2_buffer *vb)
spin_lock_irq(&priv->lock);
- list_add_tail(to_buf_list(vb), &priv->capture);
+ list_add_tail(to_buf_list(vbuf), &priv->capture);
rcar_vin_fill_hw_slot(priv);
/* If we weren't running, and have enough buffers, start capturing! */
if (priv->state != RUNNING && rcar_vin_hw_ready(priv)) {
if (rcar_vin_setup(priv)) {
/* Submit error */
- list_del_init(to_buf_list(vb));
+ list_del_init(to_buf_list(vbuf));
spin_unlock_irq(&priv->lock);
goto error;
}
@@ -854,7 +857,7 @@ static void rcar_vin_stop_streaming(struct vb2_queue *vq)
for (i = 0; i < MAX_BUFFER_NUM; i++) {
if (priv->queue_buf[i]) {
- vb2_buffer_done(priv->queue_buf[i],
+ vb2_buffer_done(&priv->queue_buf[i]->vb2_buf,
VB2_BUF_STATE_ERROR);
priv->queue_buf[i] = NULL;
}
@@ -862,7 +865,7 @@ static void rcar_vin_stop_streaming(struct vb2_queue *vq)
list_for_each_safe(buf_head, tmp, &priv->capture) {
vb2_buffer_done(&list_entry(buf_head,
- struct rcar_vin_buffer, list)->vb,
+ struct rcar_vin_buffer, list)->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
list_del_init(buf_head);
}
@@ -907,10 +910,11 @@ static irqreturn_t rcar_vin_irq(int irq, void *data)
else
slot = 0;
- priv->queue_buf[slot]->v4l2_buf.field = priv->field;
- priv->queue_buf[slot]->v4l2_buf.sequence = priv->sequence++;
- v4l2_get_timestamp(&priv->queue_buf[slot]->v4l2_buf.timestamp);
- vb2_buffer_done(priv->queue_buf[slot], VB2_BUF_STATE_DONE);
+ priv->queue_buf[slot]->field = priv->field;
+ priv->queue_buf[slot]->sequence = priv->sequence++;
+ v4l2_get_timestamp(&priv->queue_buf[slot]->timestamp);
+ vb2_buffer_done(&priv->queue_buf[slot]->vb2_buf,
+ VB2_BUF_STATE_DONE);
priv->queue_buf[slot] = NULL;
if (priv->state != STOPPING)
@@ -964,7 +968,7 @@ static void rcar_vin_remove_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vbuf;
int i;
/* disable capture, disable interrupts */
@@ -978,10 +982,10 @@ static void rcar_vin_remove_device(struct soc_camera_device *icd)
/* make sure active buffer is cancelled */
spin_lock_irq(&priv->lock);
for (i = 0; i < MAX_BUFFER_NUM; i++) {
- vb = priv->queue_buf[i];
- if (vb) {
- list_del_init(to_buf_list(vb));
- vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ vbuf = priv->queue_buf[i];
+ if (vbuf) {
+ list_del_init(to_buf_list(vbuf));
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_ERROR);
}
}
spin_unlock_irq(&priv->lock);
@@ -1602,11 +1606,15 @@ static int rcar_vin_set_fmt(struct soc_camera_device *icd,
case V4L2_FIELD_INTERLACED:
/* Query for standard if not explicitly mentioned _TB/_BT */
ret = v4l2_subdev_call(sd, video, querystd, &std);
- if (ret < 0)
- std = V4L2_STD_625_50;
-
- field = std & V4L2_STD_625_50 ? V4L2_FIELD_INTERLACED_TB :
- V4L2_FIELD_INTERLACED_BT;
+ if (ret == -ENOIOCTLCMD) {
+ field = V4L2_FIELD_NONE;
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ field = std & V4L2_STD_625_50 ?
+ V4L2_FIELD_INTERLACED_TB :
+ V4L2_FIELD_INTERLACED_BT;
+ }
break;
}
@@ -1846,8 +1854,6 @@ MODULE_DEVICE_TABLE(of, rcar_vin_of_table);
#endif
static struct platform_device_id rcar_vin_id_table[] = {
- { "r8a7791-vin", RCAR_GEN2 },
- { "r8a7790-vin", RCAR_GEN2 },
{ "r8a7779-vin", RCAR_H1 },
{ "r8a7778-vin", RCAR_M1 },
{ "uPD35004-vin", RCAR_E1 },
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index efdeea4490e8..67a669d826b8 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -93,7 +93,7 @@
/* per video frame buffer */
struct sh_mobile_ceu_buffer {
- struct vb2_buffer vb; /* v4l buffer must be first */
+ struct vb2_v4l2_buffer vb; /* v4l buffer must be first */
struct list_head queue;
};
@@ -112,7 +112,7 @@ struct sh_mobile_ceu_dev {
spinlock_t lock; /* Protects video buffer lists */
struct list_head capture;
- struct vb2_buffer *active;
+ struct vb2_v4l2_buffer *active;
struct vb2_alloc_ctx *alloc_ctx;
struct sh_mobile_ceu_info *pdata;
@@ -152,9 +152,9 @@ struct sh_mobile_ceu_cam {
u32 code;
};
-static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb)
+static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_v4l2_buffer *vbuf)
{
- return container_of(vb, struct sh_mobile_ceu_buffer, vb);
+ return container_of(vbuf, struct sh_mobile_ceu_buffer, vb);
}
static void ceu_write(struct sh_mobile_ceu_dev *priv,
@@ -210,11 +210,13 @@ static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
* for the current frame format if required
*/
static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *count, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
- struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq);
+ const struct v4l2_format *fmt = parg;
+ struct soc_camera_device *icd = container_of(vq,
+ struct soc_camera_device, vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -334,7 +336,8 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
bottom2 = CDBCR;
}
- phys_addr_top = vb2_dma_contig_plane_dma_addr(pcdev->active, 0);
+ phys_addr_top =
+ vb2_dma_contig_plane_dma_addr(&pcdev->active->vb2_buf, 0);
switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
@@ -369,7 +372,8 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
{
- struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
/* Added list head initialization on alloc */
WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb);
@@ -379,17 +383,19 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
{
- struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct soc_camera_device *icd = container_of(vb->vb2_queue,
+ struct soc_camera_device, vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+ struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
unsigned long size;
size = icd->sizeimage;
if (vb2_plane_size(vb, 0) < size) {
dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
- vb->v4l2_buf.index, vb2_plane_size(vb, 0), size);
+ vb->index, vb2_plane_size(vb, 0), size);
goto error;
}
@@ -416,7 +422,7 @@ static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
* we are not interested in the return value of
* sh_mobile_ceu_capture here.
*/
- pcdev->active = vb;
+ pcdev->active = vbuf;
sh_mobile_ceu_capture(pcdev);
}
spin_unlock_irq(&pcdev->lock);
@@ -429,14 +435,16 @@ error:
static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
{
- struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct soc_camera_device *icd = container_of(vb->vb2_queue,
+ struct soc_camera_device, vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+ struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
spin_lock_irq(&pcdev->lock);
- if (pcdev->active == vb) {
+ if (pcdev->active == vbuf) {
/* disable capture (release DMA buffer), reset */
ceu_write(pcdev, CAPSR, 1 << 16);
pcdev->active = NULL;
@@ -458,7 +466,9 @@ static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
{
- struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct soc_camera_device *icd = container_of(vb->vb2_queue,
+ struct soc_camera_device, vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -467,7 +477,7 @@ static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
pcdev->buf_total);
/* This is for locking debugging only */
- INIT_LIST_HEAD(&to_ceu_vb(vb)->queue);
+ INIT_LIST_HEAD(&to_ceu_vb(vbuf)->queue);
return 0;
}
@@ -504,17 +514,17 @@ static struct vb2_ops sh_mobile_ceu_videobuf_ops = {
static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
{
struct sh_mobile_ceu_dev *pcdev = data;
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vbuf;
int ret;
spin_lock(&pcdev->lock);
- vb = pcdev->active;
- if (!vb)
+ vbuf = pcdev->active;
+ if (!vbuf)
/* Stale interrupt from a released buffer */
goto out;
- list_del_init(&to_ceu_vb(vb)->queue);
+ list_del_init(&to_ceu_vb(vbuf)->queue);
if (!list_empty(&pcdev->capture))
pcdev->active = &list_entry(pcdev->capture.next,
@@ -523,12 +533,13 @@ static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
pcdev->active = NULL;
ret = sh_mobile_ceu_capture(pcdev);
- v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
+ v4l2_get_timestamp(&vbuf->timestamp);
if (!ret) {
- vb->v4l2_buf.field = pcdev->field;
- vb->v4l2_buf.sequence = pcdev->sequence++;
+ vbuf->field = pcdev->field;
+ vbuf->sequence = pcdev->sequence++;
}
- vb2_buffer_done(vb, ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&vbuf->vb2_buf,
+ ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
out:
spin_unlock(&pcdev->lock);
@@ -633,7 +644,7 @@ static void sh_mobile_ceu_clock_stop(struct soc_camera_host *ici)
spin_lock_irq(&pcdev->lock);
if (pcdev->active) {
list_del_init(&to_ceu_vb(pcdev->active)->queue);
- vb2_buffer_done(pcdev->active, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&pcdev->active->vb2_buf, VB2_BUF_STATE_ERROR);
pcdev->active = NULL;
}
spin_unlock_irq(&pcdev->lock);
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 9087fed586fb..dc98122e78dc 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -38,7 +38,7 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-of.h>
#include <media/videobuf-core.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
/* Default to VGA resolution */
#define DEFAULT_WIDTH 640
@@ -1631,7 +1631,7 @@ static int soc_of_bind(struct soc_camera_host *ici,
struct soc_camera_async_client *sasc;
struct soc_of_info *info;
struct i2c_client *client;
- char clk_name[V4L2_SUBDEV_NAME_SIZE];
+ char clk_name[V4L2_SUBDEV_NAME_SIZE + 32];
int ret;
/* allocate a new subdev and add match info to it */
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index df61355b46f1..a0d267e017f6 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -180,7 +180,7 @@ static struct bdisp_frame *ctx_get_frame(struct bdisp_ctx *ctx,
static void bdisp_job_finish(struct bdisp_ctx *ctx, int vb_state)
{
- struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
if (WARN(!ctx || !ctx->fh.m2m_ctx, "Null hardware context\n"))
return;
@@ -191,10 +191,10 @@ static void bdisp_job_finish(struct bdisp_ctx *ctx, int vb_state)
dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (src_vb && dst_vb) {
- dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
- dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
- dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_vb->v4l2_buf.flags |= src_vb->v4l2_buf.flags &
+ dst_vb->timestamp = src_vb->timestamp;
+ dst_vb->timecode = src_vb->timecode;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |= src_vb->flags &
V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
v4l2_m2m_buf_done(src_vb, vb_state);
@@ -281,23 +281,23 @@ static int bdisp_get_addr(struct bdisp_ctx *ctx, struct vb2_buffer *vb,
static int bdisp_get_bufs(struct bdisp_ctx *ctx)
{
struct bdisp_frame *src, *dst;
- struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
int ret;
src = &ctx->src;
dst = &ctx->dst;
src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- ret = bdisp_get_addr(ctx, src_vb, src, src->paddr);
+ ret = bdisp_get_addr(ctx, &src_vb->vb2_buf, src, src->paddr);
if (ret)
return ret;
dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- ret = bdisp_get_addr(ctx, dst_vb, dst, dst->paddr);
+ ret = bdisp_get_addr(ctx, &dst_vb->vb2_buf, dst, dst->paddr);
if (ret)
return ret;
- dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+ dst_vb->timestamp = src_vb->timestamp;
return 0;
}
@@ -438,10 +438,11 @@ static void bdisp_ctrls_delete(struct bdisp_ctx *ctx)
}
static int bdisp_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nb_buf, unsigned int *nb_planes,
unsigned int sizes[], void *allocators[])
{
+ const struct v4l2_format *fmt = parg;
struct bdisp_ctx *ctx = vb2_get_drv_priv(vq);
struct bdisp_frame *frame = ctx_get_frame(ctx, vq->type);
@@ -483,6 +484,7 @@ static int bdisp_buf_prepare(struct vb2_buffer *vb)
static void bdisp_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct bdisp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
/* return to V4L2 any 0-size buffer so it can be dequeued by user */
@@ -493,13 +495,13 @@ static void bdisp_buf_queue(struct vb2_buffer *vb)
}
if (ctx->fh.m2m_ctx)
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct bdisp_ctx *ctx = q->drv_priv;
- struct vb2_buffer *buf;
+ struct vb2_v4l2_buffer *buf;
int ret = pm_runtime_get_sync(ctx->bdisp_dev->dev);
if (ret < 0) {
diff --git a/drivers/media/platform/sti/c8sectpfe/Kconfig b/drivers/media/platform/sti/c8sectpfe/Kconfig
index 641ad8f34956..7420a50572d3 100644
--- a/drivers/media/platform/sti/c8sectpfe/Kconfig
+++ b/drivers/media/platform/sti/c8sectpfe/Kconfig
@@ -3,7 +3,6 @@ config DVB_C8SECTPFE
depends on PINCTRL && DVB_CORE && I2C
depends on ARCH_STI || ARCH_MULTIPLATFORM || COMPILE_TEST
select FW_LOADER
- select FW_LOADER_USER_HELPER_FALLBACK
select DEBUG_FS
select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV090x if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index f922f2e827bc..8490a65ae1c6 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -1084,10 +1084,10 @@ static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
seg_num, phdr->p_paddr, phdr->p_filesz,
dst, phdr->p_memsz);
- memcpy((void __iomem *)dst, (void *)fw->data + phdr->p_offset,
+ memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
phdr->p_filesz);
- memset((void __iomem *)dst + phdr->p_filesz, 0,
+ memset((void __force *)dst + phdr->p_filesz, 0,
phdr->p_memsz - phdr->p_filesz);
}
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index c44760b705da..de24effd984f 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -40,7 +40,7 @@
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "vpdma.h"
@@ -384,8 +384,8 @@ struct vpe_ctx {
unsigned int bufs_completed; /* bufs done in this batch */
struct vpe_q_data q_data[2]; /* src & dst queue data */
- struct vb2_buffer *src_vbs[VPE_MAX_SRC_BUFS];
- struct vb2_buffer *dst_vb;
+ struct vb2_v4l2_buffer *src_vbs[VPE_MAX_SRC_BUFS];
+ struct vb2_v4l2_buffer *dst_vb;
dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */
void *mv_buf[2]; /* virtual addrs of motion vector bufs */
@@ -988,7 +988,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
{
struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
const struct vpe_port_data *p_data = &port_data[port];
- struct vb2_buffer *vb = ctx->dst_vb;
+ struct vb2_buffer *vb = &ctx->dst_vb->vb2_buf;
struct vpe_fmt *fmt = q_data->fmt;
const struct vpdma_data_format *vpdma_fmt;
int mv_buf_selector = !ctx->src_mv_buf_selector;
@@ -1025,11 +1025,12 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
{
struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
const struct vpe_port_data *p_data = &port_data[port];
- struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index];
+ struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpe_fmt *fmt = q_data->fmt;
const struct vpdma_data_format *vpdma_fmt;
int mv_buf_selector = ctx->src_mv_buf_selector;
- int field = vb->v4l2_buf.field == V4L2_FIELD_BOTTOM;
+ int field = vbuf->field == V4L2_FIELD_BOTTOM;
int frame_width, frame_height;
dma_addr_t dma_addr;
u32 flags = 0;
@@ -1222,8 +1223,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
struct vpe_dev *dev = (struct vpe_dev *)data;
struct vpe_ctx *ctx;
struct vpe_q_data *d_q_data;
- struct vb2_buffer *s_vb, *d_vb;
- struct v4l2_buffer *s_buf, *d_buf;
+ struct vb2_v4l2_buffer *s_vb, *d_vb;
unsigned long flags;
u32 irqst0, irqst1;
@@ -1286,20 +1286,18 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
s_vb = ctx->src_vbs[0];
d_vb = ctx->dst_vb;
- s_buf = &s_vb->v4l2_buf;
- d_buf = &d_vb->v4l2_buf;
- d_buf->flags = s_buf->flags;
+ d_vb->flags = s_vb->flags;
+ d_vb->timestamp = s_vb->timestamp;
- d_buf->timestamp = s_buf->timestamp;
- if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE)
- d_buf->timecode = s_buf->timecode;
+ if (s_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+ d_vb->timecode = s_vb->timecode;
- d_buf->sequence = ctx->sequence;
+ d_vb->sequence = ctx->sequence;
d_q_data = &ctx->q_data[Q_DATA_DST];
if (d_q_data->flags & Q_DATA_INTERLACED) {
- d_buf->field = ctx->field;
+ d_vb->field = ctx->field;
if (ctx->field == V4L2_FIELD_BOTTOM) {
ctx->sequence++;
ctx->field = V4L2_FIELD_TOP;
@@ -1308,7 +1306,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
ctx->field = V4L2_FIELD_BOTTOM;
}
} else {
- d_buf->field = V4L2_FIELD_NONE;
+ d_vb->field = V4L2_FIELD_NONE;
ctx->sequence++;
}
@@ -1798,7 +1796,7 @@ static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
* Queue operations
*/
static int vpe_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -1825,6 +1823,7 @@ static int vpe_queue_setup(struct vb2_queue *vq,
static int vpe_buf_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vpe_q_data *q_data;
int i, num_planes;
@@ -1836,10 +1835,10 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (!(q_data->flags & Q_DATA_INTERLACED)) {
- vb->v4l2_buf.field = V4L2_FIELD_NONE;
+ vbuf->field = V4L2_FIELD_NONE;
} else {
- if (vb->v4l2_buf.field != V4L2_FIELD_TOP &&
- vb->v4l2_buf.field != V4L2_FIELD_BOTTOM)
+ if (vbuf->field != V4L2_FIELD_TOP &&
+ vbuf->field != V4L2_FIELD_BOTTOM)
return -EINVAL;
}
}
@@ -1862,9 +1861,10 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
static void vpe_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 295fde5fdb75..e18fb9f9ed2f 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -197,8 +197,8 @@ static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx,
static int device_process(struct vim2m_ctx *ctx,
- struct vb2_buffer *in_vb,
- struct vb2_buffer *out_vb)
+ struct vb2_v4l2_buffer *in_vb,
+ struct vb2_v4l2_buffer *out_vb)
{
struct vim2m_dev *dev = ctx->dev;
struct vim2m_q_data *q_data;
@@ -213,15 +213,16 @@ static int device_process(struct vim2m_ctx *ctx,
height = q_data->height;
bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
- p_in = vb2_plane_vaddr(in_vb, 0);
- p_out = vb2_plane_vaddr(out_vb, 0);
+ p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0);
+ p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0);
if (!p_in || !p_out) {
v4l2_err(&dev->v4l2_dev,
"Acquiring kernel pointers to buffers failed\n");
return -EFAULT;
}
- if (vb2_plane_size(in_vb, 0) > vb2_plane_size(out_vb, 0)) {
+ if (vb2_plane_size(&in_vb->vb2_buf, 0) >
+ vb2_plane_size(&out_vb->vb2_buf, 0)) {
v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
return -EINVAL;
}
@@ -231,16 +232,15 @@ static int device_process(struct vim2m_ctx *ctx,
bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES;
w = 0;
- out_vb->v4l2_buf.sequence = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++;
- in_vb->v4l2_buf.sequence = q_data->sequence++;
- memcpy(&out_vb->v4l2_buf.timestamp,
- &in_vb->v4l2_buf.timestamp,
- sizeof(struct timeval));
- if (in_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TIMECODE)
- memcpy(&out_vb->v4l2_buf.timecode, &in_vb->v4l2_buf.timecode,
- sizeof(struct v4l2_timecode));
- out_vb->v4l2_buf.field = in_vb->v4l2_buf.field;
- out_vb->v4l2_buf.flags = in_vb->v4l2_buf.flags &
+ out_vb->sequence =
+ get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++;
+ in_vb->sequence = q_data->sequence++;
+ out_vb->timestamp = in_vb->timestamp;
+
+ if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+ out_vb->timecode = in_vb->timecode;
+ out_vb->field = in_vb->field;
+ out_vb->flags = in_vb->flags &
(V4L2_BUF_FLAG_TIMECODE |
V4L2_BUF_FLAG_KEYFRAME |
V4L2_BUF_FLAG_PFRAME |
@@ -374,7 +374,7 @@ static void device_run(void *priv)
{
struct vim2m_ctx *ctx = priv;
struct vim2m_dev *dev = ctx->dev;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -389,7 +389,7 @@ static void device_isr(unsigned long priv)
{
struct vim2m_dev *vim2m_dev = (struct vim2m_dev *)priv;
struct vim2m_ctx *curr_ctx;
- struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
unsigned long flags;
curr_ctx = v4l2_m2m_get_curr_priv(vim2m_dev->m2m_dev);
@@ -710,10 +710,11 @@ static const struct v4l2_ioctl_ops vim2m_ioctl_ops = {
*/
static int vim2m_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct vim2m_ctx *ctx = vb2_get_drv_priv(vq);
struct vim2m_q_data *q_data;
unsigned int size, count = *nbuffers;
@@ -747,6 +748,7 @@ static int vim2m_queue_setup(struct vb2_queue *vq,
static int vim2m_buf_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vim2m_q_data *q_data;
@@ -754,9 +756,9 @@ static int vim2m_buf_prepare(struct vb2_buffer *vb)
q_data = get_q_data(ctx, vb->vb2_queue->type);
if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
- if (vb->v4l2_buf.field == V4L2_FIELD_ANY)
- vb->v4l2_buf.field = V4L2_FIELD_NONE;
- if (vb->v4l2_buf.field != V4L2_FIELD_NONE) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
dprintk(ctx->dev, "%s field isn't supported\n",
__func__);
return -EINVAL;
@@ -776,9 +778,10 @@ static int vim2m_buf_prepare(struct vb2_buffer *vb)
static void vim2m_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
@@ -793,18 +796,18 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
static void vim2m_stop_streaming(struct vb2_queue *q)
{
struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vbuf;
unsigned long flags;
for (;;) {
if (V4L2_TYPE_IS_OUTPUT(q->type))
- vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
else
- vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- if (vb == NULL)
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vbuf == NULL)
return;
spin_lock_irqsave(&ctx->dev->irqlock, flags);
- v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
}
}
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index c3090932f06d..0885e93ad436 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -20,3 +20,11 @@ config VIDEO_VIVID
Say Y here if you want to test video apps or debug V4L devices.
When in doubt, say N.
+
+config VIDEO_VIVID_MAX_DEVS
+ int "Maximum number of devices"
+ depends on VIDEO_VIVID
+ default "64"
+ ---help---
+ This allows you to specify the maximum number of devices supported
+ by the vivid driver.
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index a047b4716741..ec125becb7af 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -51,7 +51,7 @@
#define VIVID_MODULE_NAME "vivid"
/* The maximum number of vivid devices */
-#define VIVID_MAX_DEVS 64
+#define VIVID_MAX_DEVS CONFIG_VIDEO_VIVID_MAX_DEVS
MODULE_DESCRIPTION("Virtual Video Test Driver");
MODULE_AUTHOR("Hans Verkuil");
@@ -1341,8 +1341,11 @@ static int vivid_remove(struct platform_device *pdev)
struct vivid_dev *dev;
unsigned i;
- for (i = 0; vivid_devs[i]; i++) {
+
+ for (i = 0; i < n_devs; i++) {
dev = vivid_devs[i];
+ if (!dev)
+ continue;
if (dev->has_vid_cap) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index c72349c83fab..55b304a705d5 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -21,7 +21,7 @@
#define _VIVID_CORE_H_
#include <linux/fb.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ctrls.h>
@@ -93,7 +93,7 @@ extern struct vivid_fmt vivid_formats[];
/* buffer for one video frame */
struct vivid_buffer {
/* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
@@ -123,6 +123,7 @@ enum vivid_colorspace {
VIVID_CS_SRGB,
VIVID_CS_ADOBERGB,
VIVID_CS_2020,
+ VIVID_CS_DCI_P3,
VIVID_CS_240M,
VIVID_CS_SYS_M,
VIVID_CS_SYS_BG,
@@ -451,6 +452,7 @@ struct vivid_dev {
unsigned sdr_buffersize;
unsigned sdr_adc_freq;
unsigned sdr_fm_freq;
+ unsigned sdr_fm_deviation;
int sdr_fixp_src_phase;
int sdr_fixp_mod_phase;
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index 339c8b7e53c8..f41ac0b01fec 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -99,6 +99,7 @@
#define VIVID_CID_RADIO_TX_RDS_BLOCKIO (VIVID_CID_VIVID_BASE + 94)
+#define VIVID_CID_SDR_CAP_FM_DEVIATION (VIVID_CID_VIVID_BASE + 110)
/* General User Controls */
@@ -342,6 +343,7 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
V4L2_COLORSPACE_SRGB,
V4L2_COLORSPACE_ADOBERGB,
V4L2_COLORSPACE_BT2020,
+ V4L2_COLORSPACE_DCI_P3,
V4L2_COLORSPACE_SMPTE240M,
V4L2_COLORSPACE_470_SYSTEM_M,
V4L2_COLORSPACE_470_SYSTEM_BG,
@@ -548,7 +550,7 @@ static const struct v4l2_ctrl_config vivid_ctrl_osd_mode = {
.id = VIVID_CID_OSD_TEXT_MODE,
.name = "OSD Text Mode",
.type = V4L2_CTRL_TYPE_MENU,
- .max = 2,
+ .max = ARRAY_SIZE(vivid_ctrl_osd_mode_strings) - 2,
.qmenu = vivid_ctrl_osd_mode_strings,
};
@@ -640,7 +642,7 @@ static const struct v4l2_ctrl_config vivid_ctrl_tstamp_src = {
.id = VIVID_CID_TSTAMP_SRC,
.name = "Timestamp Source",
.type = V4L2_CTRL_TYPE_MENU,
- .max = 1,
+ .max = ARRAY_SIZE(vivid_ctrl_tstamp_src_strings) - 2,
.qmenu = vivid_ctrl_tstamp_src_strings,
};
@@ -701,6 +703,7 @@ static const char * const vivid_ctrl_colorspace_strings[] = {
"sRGB",
"AdobeRGB",
"BT.2020",
+ "DCI-P3",
"SMPTE 240M",
"470 System M",
"470 System BG",
@@ -712,7 +715,7 @@ static const struct v4l2_ctrl_config vivid_ctrl_colorspace = {
.id = VIVID_CID_COLORSPACE,
.name = "Colorspace",
.type = V4L2_CTRL_TYPE_MENU,
- .max = 7,
+ .max = ARRAY_SIZE(vivid_ctrl_colorspace_strings) - 2,
.def = 2,
.qmenu = vivid_ctrl_colorspace_strings,
};
@@ -724,6 +727,8 @@ static const char * const vivid_ctrl_xfer_func_strings[] = {
"AdobeRGB",
"SMPTE 240M",
"None",
+ "DCI-P3",
+ "SMPTE 2084",
NULL,
};
@@ -732,7 +737,7 @@ static const struct v4l2_ctrl_config vivid_ctrl_xfer_func = {
.id = VIVID_CID_XFER_FUNC,
.name = "Transfer Function",
.type = V4L2_CTRL_TYPE_MENU,
- .max = 5,
+ .max = ARRAY_SIZE(vivid_ctrl_xfer_func_strings) - 2,
.qmenu = vivid_ctrl_xfer_func_strings,
};
@@ -754,7 +759,7 @@ static const struct v4l2_ctrl_config vivid_ctrl_ycbcr_enc = {
.id = VIVID_CID_YCBCR_ENC,
.name = "Y'CbCr Encoding",
.type = V4L2_CTRL_TYPE_MENU,
- .max = 8,
+ .max = ARRAY_SIZE(vivid_ctrl_ycbcr_enc_strings) - 2,
.qmenu = vivid_ctrl_ycbcr_enc_strings,
};
@@ -770,7 +775,7 @@ static const struct v4l2_ctrl_config vivid_ctrl_quantization = {
.id = VIVID_CID_QUANTIZATION,
.name = "Quantization",
.type = V4L2_CTRL_TYPE_MENU,
- .max = 2,
+ .max = ARRAY_SIZE(vivid_ctrl_quantization_strings) - 2,
.qmenu = vivid_ctrl_quantization_strings,
};
@@ -1088,7 +1093,7 @@ static const struct v4l2_ctrl_config vivid_ctrl_std_signal_mode = {
.id = VIVID_CID_STD_SIGNAL_MODE,
.name = "Standard Signal Mode",
.type = V4L2_CTRL_TYPE_MENU,
- .max = 5,
+ .max = ARRAY_SIZE(vivid_ctrl_std_signal_mode_strings) - 2,
.menu_skip_mask = 1 << 3,
.qmenu = vivid_ctrl_std_signal_mode_strings,
};
@@ -1257,6 +1262,36 @@ static const struct v4l2_ctrl_config vivid_ctrl_radio_tx_rds_blockio = {
};
+/* SDR Capture Controls */
+
+static int vivid_sdr_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_sdr_cap);
+
+ switch (ctrl->id) {
+ case VIVID_CID_SDR_CAP_FM_DEVIATION:
+ dev->sdr_fm_deviation = ctrl->val;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_sdr_cap_ctrl_ops = {
+ .s_ctrl = vivid_sdr_cap_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_sdr_cap_fm_deviation = {
+ .ops = &vivid_sdr_cap_ctrl_ops,
+ .id = VIVID_CID_SDR_CAP_FM_DEVIATION,
+ .name = "FM Deviation",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 100,
+ .max = 200000,
+ .def = 75000,
+ .step = 1,
+};
+
+
static const struct v4l2_ctrl_config vivid_ctrl_class = {
.ops = &vivid_user_gen_ctrl_ops,
.flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY,
@@ -1314,7 +1349,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_class, NULL);
v4l2_ctrl_handler_init(hdl_radio_tx, 17);
v4l2_ctrl_new_custom(hdl_radio_tx, &vivid_ctrl_class, NULL);
- v4l2_ctrl_handler_init(hdl_sdr_cap, 18);
+ v4l2_ctrl_handler_init(hdl_sdr_cap, 19);
v4l2_ctrl_new_custom(hdl_sdr_cap, &vivid_ctrl_class, NULL);
/* User Controls */
@@ -1545,6 +1580,10 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
&vivid_radio_tx_ctrl_ops,
V4L2_CID_RDS_TX_MUSIC_SPEECH, 0, 1, 1, 1);
}
+ if (dev->has_sdr_cap) {
+ v4l2_ctrl_new_custom(hdl_sdr_cap,
+ &vivid_ctrl_sdr_cap_fm_deviation, NULL);
+ }
if (hdl_user_gen->error)
return hdl_user_gen->error;
if (hdl_user_vid->error)
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 1727f5453f0b..83cc6d3b4784 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -236,8 +236,8 @@ static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf,
void *vbuf;
if (p == 0 || tpg_g_buffers(tpg) > 1)
- return vb2_plane_vaddr(&buf->vb, p);
- vbuf = vb2_plane_vaddr(&buf->vb, 0);
+ return vb2_plane_vaddr(&buf->vb.vb2_buf, p);
+ vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
for (i = 0; i < p; i++)
vbuf += bpl[i] * h / tpg->vdownsampling[i];
return vbuf;
@@ -246,7 +246,7 @@ static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf,
static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
struct vivid_buffer *vid_cap_buf)
{
- bool blank = dev->must_blank[vid_cap_buf->vb.v4l2_buf.index];
+ bool blank = dev->must_blank[vid_cap_buf->vb.vb2_buf.index];
struct tpg_data *tpg = &dev->tpg;
struct vivid_buffer *vid_out_buf = NULL;
unsigned vdiv = dev->fmt_out->vdownsampling[p];
@@ -283,12 +283,12 @@ static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
if (vid_out_buf == NULL)
return -ENODATA;
- vid_cap_buf->vb.v4l2_buf.field = vid_out_buf->vb.v4l2_buf.field;
+ vid_cap_buf->vb.field = vid_out_buf->vb.field;
voutbuf = plane_vaddr(tpg, vid_out_buf, p,
dev->bytesperline_out, dev->fmt_out_rect.height);
if (p < dev->fmt_out->buffers)
- voutbuf += vid_out_buf->vb.v4l2_planes[p].data_offset;
+ voutbuf += vid_out_buf->vb.vb2_buf.planes[p].data_offset;
voutbuf += tpg_hdiv(tpg, p, dev->loop_vid_out.left) +
(dev->loop_vid_out.top / vdiv) * stride_out;
vcapbuf += tpg_hdiv(tpg, p, dev->compose_cap.left) +
@@ -429,17 +429,19 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
bool is_loop = false;
if (dev->loop_video && dev->can_loop_video &&
- ((vivid_is_svid_cap(dev) && !VIVID_INVALID_SIGNAL(dev->std_signal_mode)) ||
- (vivid_is_hdmi_cap(dev) && !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode))))
+ ((vivid_is_svid_cap(dev) &&
+ !VIVID_INVALID_SIGNAL(dev->std_signal_mode)) ||
+ (vivid_is_hdmi_cap(dev) &&
+ !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode))))
is_loop = true;
- buf->vb.v4l2_buf.sequence = dev->vid_cap_seq_count;
+ buf->vb.sequence = dev->vid_cap_seq_count;
/*
* Take the timestamp now if the timestamp source is set to
* "Start of Exposure".
*/
if (dev->tstamp_src_is_soe)
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&buf->vb.timestamp);
if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
/*
* 60 Hz standards start with the bottom field, 50 Hz standards
@@ -447,19 +449,19 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
* then the field is TOP for 50 Hz and BOTTOM for 60 Hz
* standards.
*/
- buf->vb.v4l2_buf.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ?
+ buf->vb.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ?
V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
/*
* The sequence counter counts frames, not fields. So divide
* by two.
*/
- buf->vb.v4l2_buf.sequence /= 2;
+ buf->vb.sequence /= 2;
} else {
- buf->vb.v4l2_buf.field = dev->field_cap;
+ buf->vb.field = dev->field_cap;
}
- tpg_s_field(tpg, buf->vb.v4l2_buf.field,
+ tpg_s_field(tpg, buf->vb.field,
dev->field_cap == V4L2_FIELD_ALTERNATE);
- tpg_s_perc_fill_blank(tpg, dev->must_blank[buf->vb.v4l2_buf.index]);
+ tpg_s_perc_fill_blank(tpg, dev->must_blank[buf->vb.vb2_buf.index]);
vivid_precalc_copy_rects(dev);
@@ -479,13 +481,16 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
}
tpg_calc_text_basep(tpg, basep, p, vbuf);
if (!is_loop || vivid_copy_buffer(dev, p, vbuf, buf))
- tpg_fill_plane_buffer(tpg, vivid_get_std_cap(dev), p, vbuf);
+ tpg_fill_plane_buffer(tpg, vivid_get_std_cap(dev),
+ p, vbuf);
}
- dev->must_blank[buf->vb.v4l2_buf.index] = false;
+ dev->must_blank[buf->vb.vb2_buf.index] = false;
/* Updates stream time, only update at the start of a new frame. */
- if (dev->field_cap != V4L2_FIELD_ALTERNATE || (buf->vb.v4l2_buf.sequence & 1) == 0)
- dev->ms_vid_cap = jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);
+ if (dev->field_cap != V4L2_FIELD_ALTERNATE ||
+ (buf->vb.sequence & 1) == 0)
+ dev->ms_vid_cap =
+ jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);
ms = dev->ms_vid_cap;
if (dev->osd_mode <= 1) {
@@ -494,9 +499,9 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
(ms / (60 * 1000)) % 60,
(ms / 1000) % 60,
ms % 1000,
- buf->vb.v4l2_buf.sequence,
+ buf->vb.sequence,
(dev->field_cap == V4L2_FIELD_ALTERNATE) ?
- (buf->vb.v4l2_buf.field == V4L2_FIELD_TOP ?
+ (buf->vb.field == V4L2_FIELD_TOP ?
" top" : " bottom") : "");
tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
}
@@ -553,8 +558,8 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
* the timestamp now.
*/
if (!dev->tstamp_src_is_soe)
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
+ v4l2_get_timestamp(&buf->vb.timestamp);
+ buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
}
/*
@@ -600,7 +605,7 @@ static void vivid_overlay(struct vivid_dev *dev, struct vivid_buffer *buf)
struct tpg_data *tpg = &dev->tpg;
unsigned pixsize = tpg_g_twopixelsize(tpg, 0) / 2;
void *vbase = dev->fb_vbase_cap;
- void *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+ void *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
unsigned img_width = dev->compose_cap.width;
unsigned img_height = dev->compose_cap.height;
unsigned stride = tpg->bytesperline[0];
@@ -616,7 +621,7 @@ static void vivid_overlay(struct vivid_dev *dev, struct vivid_buffer *buf)
return;
if ((dev->overlay_cap_field == V4L2_FIELD_TOP ||
dev->overlay_cap_field == V4L2_FIELD_BOTTOM) &&
- dev->overlay_cap_field != buf->vb.v4l2_buf.field)
+ dev->overlay_cap_field != buf->vb.field)
return;
vbuf += dev->compose_cap.left * pixsize + dev->compose_cap.top * stride;
@@ -699,17 +704,17 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
/* Fill buffer */
vivid_fillbuff(dev, vid_cap_buf);
dprintk(dev, 1, "filled buffer %d\n",
- vid_cap_buf->vb.v4l2_buf.index);
+ vid_cap_buf->vb.vb2_buf.index);
/* Handle overlay */
if (dev->overlay_cap_owner && dev->fb_cap.base &&
- dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc)
+ dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc)
vivid_overlay(dev, vid_cap_buf);
- vb2_buffer_done(&vid_cap_buf->vb, dev->dqbuf_error ?
+ vb2_buffer_done(&vid_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
dprintk(dev, 2, "vid_cap buffer %d done\n",
- vid_cap_buf->vb.v4l2_buf.index);
+ vid_cap_buf->vb.vb2_buf.index);
}
if (vbi_cap_buf) {
@@ -717,10 +722,10 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
vivid_sliced_vbi_cap_process(dev, vbi_cap_buf);
else
vivid_raw_vbi_cap_process(dev, vbi_cap_buf);
- vb2_buffer_done(&vbi_cap_buf->vb, dev->dqbuf_error ?
+ vb2_buffer_done(&vbi_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
dprintk(dev, 2, "vbi_cap %d done\n",
- vbi_cap_buf->vb.v4l2_buf.index);
+ vbi_cap_buf->vb.vb2_buf.index);
}
dev->dqbuf_error = false;
@@ -884,9 +889,9 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
buf = list_entry(dev->vid_cap_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(dev, 2, "vid_cap buffer %d done\n",
- buf->vb.v4l2_buf.index);
+ buf->vb.vb2_buf.index);
}
}
@@ -897,9 +902,9 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
buf = list_entry(dev->vbi_cap_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(dev, 2, "vbi_cap buffer %d done\n",
- buf->vb.v4l2_buf.index);
+ buf->vb.vb2_buf.index);
}
}
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index d9f36ccd7efb..c2c46dcdbe95 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -87,33 +87,33 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
return;
if (vid_out_buf) {
- vid_out_buf->vb.v4l2_buf.sequence = dev->vid_out_seq_count;
+ vid_out_buf->vb.sequence = dev->vid_out_seq_count;
if (dev->field_out == V4L2_FIELD_ALTERNATE) {
/*
- * The sequence counter counts frames, not fields. So divide
- * by two.
+ * The sequence counter counts frames, not fields.
+ * So divide by two.
*/
- vid_out_buf->vb.v4l2_buf.sequence /= 2;
+ vid_out_buf->vb.sequence /= 2;
}
- v4l2_get_timestamp(&vid_out_buf->vb.v4l2_buf.timestamp);
- vid_out_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
- vb2_buffer_done(&vid_out_buf->vb, dev->dqbuf_error ?
+ v4l2_get_timestamp(&vid_out_buf->vb.timestamp);
+ vid_out_buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
+ vb2_buffer_done(&vid_out_buf->vb.vb2_buf, dev->dqbuf_error ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
dprintk(dev, 2, "vid_out buffer %d done\n",
- vid_out_buf->vb.v4l2_buf.index);
+ vid_out_buf->vb.vb2_buf.index);
}
if (vbi_out_buf) {
if (dev->stream_sliced_vbi_out)
vivid_sliced_vbi_out_process(dev, vbi_out_buf);
- vbi_out_buf->vb.v4l2_buf.sequence = dev->vbi_out_seq_count;
- v4l2_get_timestamp(&vbi_out_buf->vb.v4l2_buf.timestamp);
- vbi_out_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
- vb2_buffer_done(&vbi_out_buf->vb, dev->dqbuf_error ?
+ vbi_out_buf->vb.sequence = dev->vbi_out_seq_count;
+ v4l2_get_timestamp(&vbi_out_buf->vb.timestamp);
+ vbi_out_buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
+ vb2_buffer_done(&vbi_out_buf->vb.vb2_buf, dev->dqbuf_error ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
dprintk(dev, 2, "vbi_out buffer %d done\n",
- vbi_out_buf->vb.v4l2_buf.index);
+ vbi_out_buf->vb.vb2_buf.index);
}
dev->dqbuf_error = false;
}
@@ -274,9 +274,9 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
buf = list_entry(dev->vid_out_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(dev, 2, "vid_out buffer %d done\n",
- buf->vb.v4l2_buf.index);
+ buf->vb.vb2_buf.index);
}
}
@@ -287,9 +287,9 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
buf = list_entry(dev->vbi_out_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(dev, 2, "vbi_out buffer %d done\n",
- buf->vb.v4l2_buf.index);
+ buf->vb.vb2_buf.index);
}
}
diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
index 084d346fb4c4..e15eef6a94e5 100644
--- a/drivers/media/platform/vivid/vivid-osd.c
+++ b/drivers/media/platform/vivid/vivid-osd.c
@@ -85,6 +85,7 @@ static int vivid_fb_ioctl(struct fb_info *info, unsigned cmd, unsigned long arg)
case FBIOGET_VBLANK: {
struct fb_vblank vblank;
+ memset(&vblank, 0, sizeof(vblank));
vblank.flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VCOUNT |
FB_VBLANK_HAVE_VSYNC;
vblank.count = 0;
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index d2f2188a0efe..082c401764ce 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/math64.h>
#include <linux/videodev2.h>
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-common.h>
@@ -40,7 +41,7 @@ struct vivid_format {
};
/* format descriptions for capture and preview */
-static struct vivid_format formats[] = {
+static const struct vivid_format formats[] = {
{
.pixelformat = V4L2_SDR_FMT_CU8,
.buffersize = SDR_CAP_SAMPLES_PER_BUF * 2,
@@ -114,11 +115,11 @@ static void vivid_thread_sdr_cap_tick(struct vivid_dev *dev)
spin_unlock(&dev->slock);
if (sdr_cap_buf) {
- sdr_cap_buf->vb.v4l2_buf.sequence = dev->sdr_cap_seq_count;
+ sdr_cap_buf->vb.sequence = dev->sdr_cap_seq_count;
vivid_sdr_cap_process(dev, sdr_cap_buf);
- v4l2_get_timestamp(&sdr_cap_buf->vb.v4l2_buf.timestamp);
- sdr_cap_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
- vb2_buffer_done(&sdr_cap_buf->vb, dev->dqbuf_error ?
+ v4l2_get_timestamp(&sdr_cap_buf->vb.timestamp);
+ sdr_cap_buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
+ vb2_buffer_done(&sdr_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
dev->dqbuf_error = false;
}
@@ -161,7 +162,8 @@ static int vivid_thread_sdr_cap(void *data)
/* Calculate the number of jiffies since we started streaming */
jiffies_since_start = cur_jiffies - dev->jiffies_sdr_cap;
/* Get the number of buffers streamed since the start */
- buffers_since_start = (u64)jiffies_since_start * dev->sdr_adc_freq +
+ buffers_since_start =
+ (u64)jiffies_since_start * dev->sdr_adc_freq +
(HZ * SDR_CAP_SAMPLES_PER_BUF) / 2;
do_div(buffers_since_start, HZ * SDR_CAP_SAMPLES_PER_BUF);
@@ -176,7 +178,8 @@ static int vivid_thread_sdr_cap(void *data)
dev->sdr_cap_seq_offset = buffers_since_start;
buffers_since_start = 0;
}
- dev->sdr_cap_seq_count = buffers_since_start + dev->sdr_cap_seq_offset;
+ dev->sdr_cap_seq_count =
+ buffers_since_start + dev->sdr_cap_seq_offset;
vivid_thread_sdr_cap_tick(dev);
mutex_unlock(&dev->mutex);
@@ -210,7 +213,7 @@ static int vivid_thread_sdr_cap(void *data)
return 0;
}
-static int sdr_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int sdr_cap_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned *nbuffers, unsigned *nplanes,
unsigned sizes[], void *alloc_ctxs[])
{
@@ -247,8 +250,9 @@ static int sdr_cap_buf_prepare(struct vb2_buffer *vb)
static void sdr_cap_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
dprintk(dev, 1, "%s\n", __func__);
@@ -282,7 +286,8 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
}
}
return err;
@@ -299,9 +304,10 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
while (!list_empty(&dev->sdr_cap_active)) {
struct vivid_buffer *buf;
- buf = list_entry(dev->sdr_cap_active.next, struct vivid_buffer, list);
+ buf = list_entry(dev->sdr_cap_active.next,
+ struct vivid_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
/* shutdown control thread */
@@ -321,7 +327,8 @@ const struct vb2_ops vivid_sdr_cap_qops = {
.wait_finish = vb2_ops_wait_finish,
};
-int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
+int vivid_sdr_enum_freq_bands(struct file *file, void *fh,
+ struct v4l2_frequency_band *band)
{
switch (band->tuner) {
case 0:
@@ -339,7 +346,8 @@ int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency
}
}
-int vivid_sdr_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
+int vivid_sdr_g_frequency(struct file *file, void *fh,
+ struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -357,7 +365,8 @@ int vivid_sdr_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf
}
}
-int vivid_sdr_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+int vivid_sdr_s_frequency(struct file *file, void *fh,
+ const struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
unsigned freq = vf->frequency;
@@ -403,14 +412,16 @@ int vivid_sdr_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
case 0:
strlcpy(vt->name, "ADC", sizeof(vt->name));
vt->type = V4L2_TUNER_ADC;
- vt->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ vt->capability =
+ V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
vt->rangelow = bands_adc[0].rangelow;
vt->rangehigh = bands_adc[2].rangehigh;
return 0;
case 1:
strlcpy(vt->name, "RF", sizeof(vt->name));
vt->type = V4L2_TUNER_RF;
- vt->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ vt->capability =
+ V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
vt->rangelow = bands_fm[0].rangelow;
vt->rangehigh = bands_fm[0].rangehigh;
return 0;
@@ -488,47 +499,42 @@ int vidioc_try_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
#define FIXP_N (15)
#define FIXP_FRAC (1 << FIXP_N)
#define FIXP_2PI ((int)(2 * 3.141592653589 * FIXP_FRAC))
+#define M_100000PI (3.14159 * 100000)
void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
{
- u8 *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+ u8 *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
unsigned long i;
- unsigned long plane_size = vb2_plane_size(&buf->vb, 0);
+ unsigned long plane_size = vb2_plane_size(&buf->vb.vb2_buf, 0);
+ s64 s64tmp;
s32 src_phase_step;
s32 mod_phase_step;
s32 fixp_i;
s32 fixp_q;
- /*
- * TODO: Generated beep tone goes very crackly when sample rate is
- * increased to ~1Msps or more. That is because of huge rounding error
- * of phase angle caused by used cosine implementation.
- */
-
/* calculate phase step */
#define BEEP_FREQ 1000 /* 1kHz beep */
src_phase_step = DIV_ROUND_CLOSEST(FIXP_2PI * BEEP_FREQ,
- dev->sdr_adc_freq);
+ dev->sdr_adc_freq);
for (i = 0; i < plane_size; i += 2) {
mod_phase_step = fixp_cos32_rad(dev->sdr_fixp_src_phase,
FIXP_2PI) >> (31 - FIXP_N);
dev->sdr_fixp_src_phase += src_phase_step;
- dev->sdr_fixp_mod_phase += mod_phase_step / 4;
+ s64tmp = (s64) mod_phase_step * dev->sdr_fm_deviation;
+ dev->sdr_fixp_mod_phase += div_s64(s64tmp, M_100000PI);
/*
- * Transfer phases to [0 / 2xPI] in order to avoid variable
+ * Transfer phase angle to [0, 2xPI] in order to avoid variable
* overflow and make it suitable for cosine implementation
* used, which does not support negative angles.
*/
- while (dev->sdr_fixp_mod_phase < FIXP_2PI)
- dev->sdr_fixp_mod_phase += FIXP_2PI;
- while (dev->sdr_fixp_mod_phase > FIXP_2PI)
- dev->sdr_fixp_mod_phase -= FIXP_2PI;
+ dev->sdr_fixp_src_phase %= FIXP_2PI;
+ dev->sdr_fixp_mod_phase %= FIXP_2PI;
- while (dev->sdr_fixp_src_phase > FIXP_2PI)
- dev->sdr_fixp_src_phase -= FIXP_2PI;
+ if (dev->sdr_fixp_mod_phase < 0)
+ dev->sdr_fixp_mod_phase += FIXP_2PI;
fixp_i = fixp_cos32_rad(dev->sdr_fixp_mod_phase, FIXP_2PI);
fixp_q = fixp_sin32_rad(dev->sdr_fixp_mod_phase, FIXP_2PI);
@@ -540,7 +546,7 @@ void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
switch (dev->sdr_pixelformat) {
case V4L2_SDR_FMT_CU8:
- /* convert 'fixp float' to u8 */
+ /* convert 'fixp float' to u8 [0, +255] */
/* u8 = X * 127.5 + 127.5; X is float [-1.0, +1.0] */
fixp_i = fixp_i * 1275 + FIXP_FRAC * 1275;
fixp_q = fixp_q * 1275 + FIXP_FRAC * 1275;
@@ -548,9 +554,10 @@ void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
*vbuf++ = DIV_ROUND_CLOSEST(fixp_q, FIXP_FRAC * 10);
break;
case V4L2_SDR_FMT_CS8:
- /* convert 'fixp float' to s8 */
- fixp_i = fixp_i * 1275;
- fixp_q = fixp_q * 1275;
+ /* convert 'fixp float' to s8 [-128, +127] */
+ /* s8 = X * 127.5 - 0.5; X is float [-1.0, +1.0] */
+ fixp_i = fixp_i * 1275 - FIXP_FRAC * 5;
+ fixp_q = fixp_q * 1275 - FIXP_FRAC * 5;
*vbuf++ = DIV_ROUND_CLOSEST(fixp_i, FIXP_FRAC * 10);
*vbuf++ = DIV_ROUND_CLOSEST(fixp_q, FIXP_FRAC * 10);
break;
diff --git a/drivers/media/platform/vivid/vivid-tpg-colors.c b/drivers/media/platform/vivid/vivid-tpg-colors.c
index 8f231a6e68c9..2299f0ce47c8 100644
--- a/drivers/media/platform/vivid/vivid-tpg-colors.c
+++ b/drivers/media/platform/vivid/vivid-tpg-colors.c
@@ -598,7 +598,7 @@ const unsigned short tpg_linear_to_rec709[255 * 16 + 1] = {
};
/* Generated table */
-const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][V4L2_XFER_FUNC_NONE + 1][TPG_COLOR_CSC_BLACK + 1] = {
+const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_SMPTE2084 + 1][TPG_COLOR_CSC_BLACK + 1] = {
[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][1] = { 2953, 2963, 586 },
[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][2] = { 0, 2967, 2937 },
@@ -639,6 +639,22 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][V4L2_XFER_FUNC_N
[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][5] = { 2256, 90, 133 },
[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][6] = { 110, 96, 2113 },
[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][1] = { 3186, 3194, 1121 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][2] = { 0, 3197, 3173 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][3] = { 523, 3216, 1112 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][4] = { 3237, 792, 3169 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][5] = { 3248, 944, 1094 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][6] = { 1017, 967, 3168 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3802, 3805, 2602 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 3806, 3797 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1780, 3812, 2592 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3820, 2215, 3796 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3824, 2409, 2574 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2491, 2435, 3795 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][1] = { 2953, 2963, 586 },
[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][2] = { 0, 2967, 2937 },
@@ -679,6 +695,22 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][V4L2_XFER_FUNC_N
[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][5] = { 2256, 90, 133 },
[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][6] = { 110, 96, 2113 },
[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][1] = { 3186, 3194, 1121 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][2] = { 0, 3197, 3173 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][3] = { 523, 3216, 1112 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][4] = { 3237, 792, 3169 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][5] = { 3248, 944, 1094 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][6] = { 1017, 967, 3168 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3802, 3805, 2602 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 3806, 3797 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1780, 3812, 2592 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3820, 2215, 3796 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3824, 2409, 2574 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2491, 2435, 3795 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 547 },
[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][2] = { 547, 2939, 2939 },
@@ -719,46 +751,78 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][V4L2_XFER_FUNC_N
[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][5] = { 2125, 130, 130 },
[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2125 },
[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][0] = { 2892, 2988, 2807 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][1] = { 2846, 3070, 843 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][2] = { 1656, 2962, 2783 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][3] = { 1572, 3045, 763 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][4] = { 2476, 229, 2742 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][5] = { 2420, 672, 614 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][6] = { 725, 63, 2718 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][7] = { 534, 561, 509 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][0] = { 3013, 3099, 2935 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][1] = { 2970, 3174, 1091 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][2] = { 1871, 3076, 2913 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][3] = { 1791, 3152, 1013 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][4] = { 2632, 468, 2876 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][5] = { 2581, 924, 866 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][6] = { 976, 180, 2854 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][7] = { 786, 813, 762 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][0] = { 2990, 3077, 2912 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][1] = { 2947, 3153, 1119 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][2] = { 1859, 3053, 2889 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][3] = { 1782, 3130, 1047 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][4] = { 2608, 556, 2852 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][5] = { 2557, 964, 912 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][6] = { 1013, 309, 2830 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][7] = { 839, 864, 817 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2879, 2975, 2793 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2832, 3059, 806 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][2] = { 1629, 2949, 2768 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][3] = { 1543, 3033, 725 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][4] = { 2457, 203, 2727 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][5] = { 2401, 633, 574 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][6] = { 687, 56, 2702 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][7] = { 493, 521, 469 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][0] = { 2060, 2194, 1943 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][1] = { 1995, 2314, 237 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][2] = { 725, 2157, 1911 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][3] = { 660, 2278, 205 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][4] = { 1525, 50, 1857 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][5] = { 1461, 171, 151 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][6] = { 190, 14, 1825 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][7] = { 126, 134, 118 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1084 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][2] = { 1084, 3175, 3175 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][3] = { 1084, 3175, 1084 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][4] = { 3175, 1084, 3175 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][5] = { 3175, 1084, 1084 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3175 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2563 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][2] = { 2563, 3798, 3798 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][3] = { 2563, 3798, 2563 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][4] = { 3798, 2563, 3798 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][5] = { 3798, 2563, 2563 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3798 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][1] = { 2892, 3034, 910 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][2] = { 1715, 2916, 2914 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][3] = { 1631, 3012, 828 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][4] = { 2497, 119, 2867 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][5] = { 2440, 649, 657 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][6] = { 740, 0, 2841 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3055, 3056 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][1] = { 3013, 3142, 1157 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][2] = { 1926, 3034, 3032 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][3] = { 1847, 3121, 1076 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][4] = { 2651, 304, 2990 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][5] = { 2599, 901, 909 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][6] = { 991, 0, 2966 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][1] = { 2989, 3120, 1180 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][2] = { 1913, 3011, 3009 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][3] = { 1836, 3099, 1105 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][4] = { 2627, 413, 2966 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][5] = { 2576, 943, 951 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][6] = { 1026, 0, 2942 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2879, 3022, 874 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][2] = { 1688, 2903, 2901 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][3] = { 1603, 2999, 791 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][4] = { 2479, 106, 2853 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][5] = { 2422, 610, 618 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][6] = { 702, 0, 2827 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][1] = { 2059, 2262, 266 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][2] = { 771, 2092, 2089 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][3] = { 705, 2229, 231 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][4] = { 1550, 26, 2024 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][5] = { 1484, 163, 165 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][6] = { 196, 0, 1988 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][1] = { 3136, 3251, 1429 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][2] = { 2150, 3156, 3154 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][3] = { 2077, 3233, 1352 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][4] = { 2812, 589, 3116 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][5] = { 2765, 1182, 1190 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][6] = { 1270, 0, 3094 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3784, 3825, 2879 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][2] = { 3351, 3791, 3790 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][3] = { 3311, 3819, 2815 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3659, 1900, 3777 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3640, 2662, 2669 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2743, 0, 3769 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 464 },
[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][2] = { 786, 2939, 2939 },
@@ -799,6 +863,22 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][V4L2_XFER_FUNC_N
[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][5] = { 2041, 130, 130 },
[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2149 },
[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1003 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][2] = { 1313, 3175, 3175 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][3] = { 1313, 3175, 1003 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][4] = { 3126, 1084, 3188 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][5] = { 3126, 1084, 1084 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3188 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2476 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][2] = { 2782, 3798, 3798 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][3] = { 2782, 3798, 2476 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][4] = { 3780, 2563, 3803 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][5] = { 3780, 2563, 2563 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3803 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 547 },
[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][2] = { 547, 2939, 2939 },
@@ -839,6 +919,22 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][V4L2_XFER_FUNC_N
[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][5] = { 2125, 130, 130 },
[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2125 },
[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1084 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][2] = { 1084, 3175, 3175 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][3] = { 1084, 3175, 1084 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][4] = { 3175, 1084, 3175 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][5] = { 3175, 1084, 1084 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3175 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2563 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 2563, 3798, 3798 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 2563, 3798, 2563 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 3798, 2563, 3798 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 3798, 2563, 2563 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3798 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 781 },
[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][2] = { 1622, 2939, 2939 },
@@ -879,6 +975,22 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][V4L2_XFER_FUNC_N
[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][5] = { 1557, 130, 130 },
[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2043 },
[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1308 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][2] = { 2069, 3175, 3175 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][3] = { 2069, 3175, 1308 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][4] = { 2816, 1084, 3127 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][5] = { 2816, 1084, 1084 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3127 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2778 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 3306, 3798, 3798 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 3306, 3798, 2778 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 3661, 2563, 3781 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 3661, 2563, 2563 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3781 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][1] = { 2877, 2923, 1058 },
[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][2] = { 1837, 2840, 2916 },
@@ -919,6 +1031,78 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][V4L2_XFER_FUNC_N
[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][5] = { 1382, 268, 162 },
[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][6] = { 216, 152, 1917 },
[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][1] = { 3124, 3161, 1566 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][2] = { 2255, 3094, 3156 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][3] = { 2166, 3080, 1506 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][4] = { 2754, 1477, 3071 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][5] = { 2690, 1431, 1182 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][6] = { 1318, 1153, 3051 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][1] = { 3780, 3793, 2984 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][2] = { 3406, 3768, 3791 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][3] = { 3359, 3763, 2939 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][4] = { 3636, 2916, 3760 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][5] = { 3609, 2880, 2661 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][6] = { 2786, 2633, 3753 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][1] = { 2936, 2934, 992 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][2] = { 1159, 2890, 2916 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][3] = { 1150, 2885, 921 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][4] = { 2751, 766, 2837 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][5] = { 2747, 747, 650 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][6] = { 563, 570, 2812 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3055 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][1] = { 3052, 3051, 1237 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][2] = { 1397, 3011, 3034 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][3] = { 1389, 3006, 1168 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][4] = { 2884, 1016, 2962 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][5] = { 2880, 998, 902 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][6] = { 816, 823, 2940 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][1] = { 3029, 3028, 1255 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][2] = { 1406, 2988, 3011 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][3] = { 1398, 2983, 1190 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][4] = { 2860, 1050, 2939 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][5] = { 2857, 1033, 945 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][6] = { 866, 873, 2916 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][1] = { 2923, 2921, 957 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][2] = { 1125, 2877, 2902 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][3] = { 1116, 2871, 885 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][4] = { 2736, 729, 2823 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][5] = { 2732, 710, 611 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][6] = { 523, 531, 2798 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][1] = { 2120, 2118, 305 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][2] = { 392, 2056, 2092 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][3] = { 387, 2049, 271 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][4] = { 1868, 206, 1983 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][5] = { 1863, 199, 163 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][6] = { 135, 137, 1950 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][1] = { 3172, 3170, 1505 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][2] = { 1657, 3135, 3155 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][3] = { 1649, 3130, 1439 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][4] = { 3021, 1294, 3091 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][5] = { 3018, 1276, 1184 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][6] = { 1100, 1107, 3071 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][1] = { 3797, 3796, 2938 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][2] = { 3049, 3783, 3791 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][3] = { 3044, 3782, 2887 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][4] = { 3741, 2765, 3768 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][5] = { 3740, 2749, 2663 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][6] = { 2580, 2587, 3760 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
};
#else
@@ -930,9 +1114,13 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][V4L2_XFER_FUNC_N
#include <stdlib.h>
static const double rec709_to_ntsc1953[3][3] = {
- { 0.6689794, 0.2678309, 0.0323187 },
- { 0.0184901, 1.0742442, -0.0602820 },
- { 0.0162259, 0.0431716, 0.8549253 }
+ /*
+ * This transform uses the Bradford method to compensate for
+ * the different whitepoints.
+ */
+ { 0.6785011, 0.2883441, 0.0331548 },
+ { 0.0165284, 1.0518725, -0.0684009 },
+ { 0.0179230, 0.0506096, 0.9314674 }
};
static const double rec709_to_ebu[3][3] = {
@@ -965,6 +1153,16 @@ static const double rec709_to_bt2020[3][3] = {
{ 0.0163976, 0.0880301, 0.8955723 },
};
+static const double rec709_to_dcip3[3][3] = {
+ /*
+ * This transform uses the Bradford method to compensate for
+ * the different whitepoints.
+ */
+ { 0.8686648, 0.1288456, 0.0024896 },
+ { 0.0345479, 0.9618084, 0.0036437 },
+ { 0.0167785, 0.0710559, 0.9121655 }
+};
+
static void mult_matrix(double *r, double *g, double *b, const double m[3][3])
{
double ir, ig, ib;
@@ -1015,6 +1213,23 @@ static double transfer_rgb_to_adobergb(double v)
return pow(v, 1.0 / 2.19921875);
}
+static double transfer_rgb_to_dcip3(double v)
+{
+ return pow(v, 1.0 / 2.6);
+}
+
+static double transfer_rgb_to_smpte2084(double v)
+{
+ const double m1 = (2610.0 / 4096.0) / 4.0;
+ const double m2 = 128.0 * 2523.0 / 4096.0;
+ const double c1 = 3424.0 / 4096.0;
+ const double c2 = 32.0 * 2413.0 / 4096.0;
+ const double c3 = 32.0 * 2392.0 / 4096.0;
+
+ v = pow(v, m1);
+ return pow((c1 + c2 * v) / (1 + c3 * v), m2);
+}
+
static double transfer_srgb_to_rec709(double v)
{
return transfer_rgb_to_rec709(transfer_srgb_to_rgb(v));
@@ -1049,6 +1264,9 @@ static void csc(enum v4l2_colorspace colorspace, enum v4l2_xfer_func xfer_func,
case V4L2_COLORSPACE_BT2020:
mult_matrix(r, g, b, rec709_to_bt2020);
break;
+ case V4L2_COLORSPACE_DCI_P3:
+ mult_matrix(r, g, b, rec709_to_dcip3);
+ break;
case V4L2_COLORSPACE_SRGB:
case V4L2_COLORSPACE_REC709:
break;
@@ -1078,6 +1296,16 @@ static void csc(enum v4l2_colorspace colorspace, enum v4l2_xfer_func xfer_func,
*g = transfer_rgb_to_adobergb(*g);
*b = transfer_rgb_to_adobergb(*b);
break;
+ case V4L2_XFER_FUNC_DCI_P3:
+ *r = transfer_rgb_to_dcip3(*r);
+ *g = transfer_rgb_to_dcip3(*g);
+ *b = transfer_rgb_to_dcip3(*b);
+ break;
+ case V4L2_XFER_FUNC_SMPTE2084:
+ *r = transfer_rgb_to_smpte2084(*r);
+ *g = transfer_rgb_to_smpte2084(*g);
+ *b = transfer_rgb_to_smpte2084(*b);
+ break;
case V4L2_XFER_FUNC_SMPTE240M:
*r = transfer_rgb_to_smpte240m(*r);
*g = transfer_rgb_to_smpte240m(*g);
@@ -1102,6 +1330,8 @@ int main(int argc, char **argv)
V4L2_COLORSPACE_SRGB,
V4L2_COLORSPACE_ADOBERGB,
V4L2_COLORSPACE_BT2020,
+ 0,
+ V4L2_COLORSPACE_DCI_P3,
};
static const char * const colorspace_names[] = {
"",
@@ -1115,6 +1345,8 @@ int main(int argc, char **argv)
"V4L2_COLORSPACE_SRGB",
"V4L2_COLORSPACE_ADOBERGB",
"V4L2_COLORSPACE_BT2020",
+ "",
+ "V4L2_COLORSPACE_DCI_P3",
};
static const char * const xfer_func_names[] = {
"",
@@ -1123,6 +1355,8 @@ int main(int argc, char **argv)
"V4L2_XFER_FUNC_ADOBERGB",
"V4L2_XFER_FUNC_SMPTE240M",
"V4L2_XFER_FUNC_NONE",
+ "V4L2_XFER_FUNC_DCI_P3",
+ "V4L2_XFER_FUNC_SMPTE2084",
};
int i;
int x;
@@ -1153,9 +1387,9 @@ int main(int argc, char **argv)
printf("\n};\n\n");
printf("/* Generated table */\n");
- printf("const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][V4L2_XFER_FUNC_NONE + 1][TPG_COLOR_CSC_BLACK + 1] = {\n");
- for (c = 0; c <= V4L2_COLORSPACE_BT2020; c++) {
- for (x = 1; x <= V4L2_XFER_FUNC_NONE; x++) {
+ printf("const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_SMPTE2084 + 1][TPG_COLOR_CSC_BLACK + 1] = {\n");
+ for (c = 0; c <= V4L2_COLORSPACE_DCI_P3; c++) {
+ for (x = 1; x <= V4L2_XFER_FUNC_SMPTE2084; x++) {
for (i = 0; i <= TPG_COLOR_CSC_BLACK; i++) {
double r, g, b;
diff --git a/drivers/media/platform/vivid/vivid-tpg-colors.h b/drivers/media/platform/vivid/vivid-tpg-colors.h
index 86b8bf3fe745..4e5a76a1e25b 100644
--- a/drivers/media/platform/vivid/vivid-tpg-colors.h
+++ b/drivers/media/platform/vivid/vivid-tpg-colors.h
@@ -61,8 +61,8 @@ enum tpg_color {
extern const struct color tpg_colors[TPG_COLOR_MAX];
extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1];
extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1];
-extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1]
- [V4L2_XFER_FUNC_NONE + 1]
+extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1]
+ [V4L2_XFER_FUNC_SMPTE2084 + 1]
[TPG_COLOR_CSC_BLACK + 1];
#endif
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c
index 1458c7955547..14256141f905 100644
--- a/drivers/media/platform/vivid/vivid-tpg.c
+++ b/drivers/media/platform/vivid/vivid-tpg.c
@@ -193,6 +193,14 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
tpg->interleaved = true;
tpg->vdownsampling[1] = 1;
tpg->hdownsampling[1] = 1;
@@ -349,6 +357,17 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->twopixelsize[0] = 2;
tpg->twopixelsize[1] = 2;
break;
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SBGGR12:
+ tpg->twopixelsize[0] = 4;
+ tpg->twopixelsize[1] = 4;
+ break;
case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
@@ -1112,6 +1131,70 @@ static void gen_twopix(struct tpg_data *tpg,
buf[0][offset] = odd ? g_u : r_y;
buf[1][offset] = odd ? b_v : g_u;
break;
+ case V4L2_PIX_FMT_SBGGR10:
+ buf[0][offset] = odd ? g_u << 2 : b_v << 2;
+ buf[0][offset + 1] = odd ? g_u >> 6 : b_v >> 6;
+ buf[1][offset] = odd ? r_y << 2 : g_u << 2;
+ buf[1][offset + 1] = odd ? r_y >> 6 : g_u >> 6;
+ buf[0][offset] |= (buf[0][offset] >> 2) & 3;
+ buf[1][offset] |= (buf[1][offset] >> 2) & 3;
+ break;
+ case V4L2_PIX_FMT_SGBRG10:
+ buf[0][offset] = odd ? b_v << 2 : g_u << 2;
+ buf[0][offset + 1] = odd ? b_v >> 6 : g_u >> 6;
+ buf[1][offset] = odd ? g_u << 2 : r_y << 2;
+ buf[1][offset + 1] = odd ? g_u >> 6 : r_y >> 6;
+ buf[0][offset] |= (buf[0][offset] >> 2) & 3;
+ buf[1][offset] |= (buf[1][offset] >> 2) & 3;
+ break;
+ case V4L2_PIX_FMT_SGRBG10:
+ buf[0][offset] = odd ? r_y << 2 : g_u << 2;
+ buf[0][offset + 1] = odd ? r_y >> 6 : g_u >> 6;
+ buf[1][offset] = odd ? g_u << 2 : b_v << 2;
+ buf[1][offset + 1] = odd ? g_u >> 6 : b_v >> 6;
+ buf[0][offset] |= (buf[0][offset] >> 2) & 3;
+ buf[1][offset] |= (buf[1][offset] >> 2) & 3;
+ break;
+ case V4L2_PIX_FMT_SRGGB10:
+ buf[0][offset] = odd ? g_u << 2 : r_y << 2;
+ buf[0][offset + 1] = odd ? g_u >> 6 : r_y >> 6;
+ buf[1][offset] = odd ? b_v << 2 : g_u << 2;
+ buf[1][offset + 1] = odd ? b_v >> 6 : g_u >> 6;
+ buf[0][offset] |= (buf[0][offset] >> 2) & 3;
+ buf[1][offset] |= (buf[1][offset] >> 2) & 3;
+ break;
+ case V4L2_PIX_FMT_SBGGR12:
+ buf[0][offset] = odd ? g_u << 4 : b_v << 4;
+ buf[0][offset + 1] = odd ? g_u >> 4 : b_v >> 4;
+ buf[1][offset] = odd ? r_y << 4 : g_u << 4;
+ buf[1][offset + 1] = odd ? r_y >> 4 : g_u >> 4;
+ buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
+ buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
+ break;
+ case V4L2_PIX_FMT_SGBRG12:
+ buf[0][offset] = odd ? b_v << 4 : g_u << 4;
+ buf[0][offset + 1] = odd ? b_v >> 4 : g_u >> 4;
+ buf[1][offset] = odd ? g_u << 4 : r_y << 4;
+ buf[1][offset + 1] = odd ? g_u >> 4 : r_y >> 4;
+ buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
+ buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
+ break;
+ case V4L2_PIX_FMT_SGRBG12:
+ buf[0][offset] = odd ? r_y << 4 : g_u << 4;
+ buf[0][offset + 1] = odd ? r_y >> 4 : g_u >> 4;
+ buf[1][offset] = odd ? g_u << 4 : b_v << 4;
+ buf[1][offset + 1] = odd ? g_u >> 4 : b_v >> 4;
+ buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
+ buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
+ break;
+ case V4L2_PIX_FMT_SRGGB12:
+ buf[0][offset] = odd ? g_u << 4 : r_y << 4;
+ buf[0][offset + 1] = odd ? g_u >> 4 : r_y >> 4;
+ buf[1][offset] = odd ? b_v << 4 : g_u << 4;
+ buf[1][offset + 1] = odd ? b_v >> 4 : g_u >> 4;
+ buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
+ buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
+ break;
}
}
@@ -1122,6 +1205,14 @@ unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line)
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
return buf_line & 1;
default:
return 0;
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
index ef81b01b53d2..e903d023e9df 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.c
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -94,36 +94,38 @@ static void vivid_g_fmt_vbi_cap(struct vivid_dev *dev, struct v4l2_vbi_format *v
void vivid_raw_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
{
struct v4l2_vbi_format vbi;
- u8 *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+ u8 *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
vivid_g_fmt_vbi_cap(dev, &vbi);
- buf->vb.v4l2_buf.sequence = dev->vbi_cap_seq_count;
+ buf->vb.sequence = dev->vbi_cap_seq_count;
if (dev->field_cap == V4L2_FIELD_ALTERNATE)
- buf->vb.v4l2_buf.sequence /= 2;
+ buf->vb.sequence /= 2;
- vivid_sliced_vbi_cap_fill(dev, buf->vb.v4l2_buf.sequence);
+ vivid_sliced_vbi_cap_fill(dev, buf->vb.sequence);
- memset(vbuf, 0x10, vb2_plane_size(&buf->vb, 0));
+ memset(vbuf, 0x10, vb2_plane_size(&buf->vb.vb2_buf, 0));
if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode))
vivid_vbi_gen_raw(&dev->vbi_gen, &vbi, vbuf);
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
+ v4l2_get_timestamp(&buf->vb.timestamp);
+ buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
}
-void vivid_sliced_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
+void vivid_sliced_vbi_cap_process(struct vivid_dev *dev,
+ struct vivid_buffer *buf)
{
- struct v4l2_sliced_vbi_data *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+ struct v4l2_sliced_vbi_data *vbuf =
+ vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
- buf->vb.v4l2_buf.sequence = dev->vbi_cap_seq_count;
+ buf->vb.sequence = dev->vbi_cap_seq_count;
if (dev->field_cap == V4L2_FIELD_ALTERNATE)
- buf->vb.v4l2_buf.sequence /= 2;
+ buf->vb.sequence /= 2;
- vivid_sliced_vbi_cap_fill(dev, buf->vb.v4l2_buf.sequence);
+ vivid_sliced_vbi_cap_fill(dev, buf->vb.sequence);
- memset(vbuf, 0, vb2_plane_size(&buf->vb, 0));
+ memset(vbuf, 0, vb2_plane_size(&buf->vb.vb2_buf, 0));
if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
unsigned i;
@@ -131,11 +133,11 @@ void vivid_sliced_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *bu
vbuf[i] = dev->vbi_gen.data[i];
}
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
+ v4l2_get_timestamp(&buf->vb.timestamp);
+ buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
}
-static int vbi_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int vbi_cap_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned *nbuffers, unsigned *nplanes,
unsigned sizes[], void *alloc_ctxs[])
{
@@ -187,8 +189,9 @@ static int vbi_cap_buf_prepare(struct vb2_buffer *vb)
static void vbi_cap_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
dprintk(dev, 1, "%s\n", __func__);
@@ -215,7 +218,8 @@ static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
}
}
return err;
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c
index 4e4c70e1e04a..75c5709f938e 100644
--- a/drivers/media/platform/vivid/vivid-vbi-out.c
+++ b/drivers/media/platform/vivid/vivid-vbi-out.c
@@ -27,7 +27,7 @@
#include "vivid-vbi-out.h"
#include "vivid-vbi-cap.h"
-static int vbi_out_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int vbi_out_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned *nbuffers, unsigned *nplanes,
unsigned sizes[], void *alloc_ctxs[])
{
@@ -79,8 +79,9 @@ static int vbi_out_buf_prepare(struct vb2_buffer *vb)
static void vbi_out_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
dprintk(dev, 1, "%s\n", __func__);
@@ -107,7 +108,8 @@ static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
}
}
return err;
@@ -201,7 +203,8 @@ int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_forma
return 0;
}
-int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
+int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh,
+ struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
@@ -217,10 +220,13 @@ int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format
return 0;
}
-void vivid_sliced_vbi_out_process(struct vivid_dev *dev, struct vivid_buffer *buf)
+void vivid_sliced_vbi_out_process(struct vivid_dev *dev,
+ struct vivid_buffer *buf)
{
- struct v4l2_sliced_vbi_data *vbi = vb2_plane_vaddr(&buf->vb, 0);
- unsigned elems = vb2_get_plane_payload(&buf->vb, 0) / sizeof(*vbi);
+ struct v4l2_sliced_vbi_data *vbi =
+ vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ unsigned elems =
+ vb2_get_plane_payload(&buf->vb.vb2_buf, 0) / sizeof(*vbi);
dev->vbi_out_have_cc[0] = false;
dev->vbi_out_have_cc[1] = false;
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index ed0b8788a66f..ef5412311b2f 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -95,10 +95,11 @@ static const struct v4l2_discrete_probe webcam_probe = {
VIVID_WEBCAM_SIZES
};
-static int vid_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int vid_cap_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned *nbuffers, unsigned *nplanes,
unsigned sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct vivid_dev *dev = vb2_get_drv_priv(vq);
unsigned buffers = tpg_g_buffers(&dev->tpg);
unsigned h = dev->fmt_cap_rect.height;
@@ -198,7 +199,7 @@ static int vid_cap_buf_prepare(struct vb2_buffer *vb)
}
vb2_set_plane_payload(vb, p, size);
- vb->v4l2_planes[p].data_offset = dev->fmt_cap->data_offset[p];
+ vb->planes[p].data_offset = dev->fmt_cap->data_offset[p];
}
return 0;
@@ -206,10 +207,11 @@ static int vid_cap_buf_prepare(struct vb2_buffer *vb)
static void vid_cap_buf_finish(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct v4l2_timecode *tc = &vb->v4l2_buf.timecode;
+ struct v4l2_timecode *tc = &vbuf->timecode;
unsigned fps = 25;
- unsigned seq = vb->v4l2_buf.sequence;
+ unsigned seq = vbuf->sequence;
if (!vivid_is_sdtv_cap(dev))
return;
@@ -218,7 +220,7 @@ static void vid_cap_buf_finish(struct vb2_buffer *vb)
* Set the timecode. Rarely used, so it is interesting to
* test this.
*/
- vb->v4l2_buf.flags |= V4L2_BUF_FLAG_TIMECODE;
+ vbuf->flags |= V4L2_BUF_FLAG_TIMECODE;
if (dev->std_cap & V4L2_STD_525_60)
fps = 30;
tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
@@ -231,8 +233,9 @@ static void vid_cap_buf_finish(struct vb2_buffer *vb)
static void vid_cap_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
dprintk(dev, 1, "%s\n", __func__);
@@ -268,7 +271,8 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
}
}
return err;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index fc73927a4abc..1678b730dba2 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -390,6 +390,62 @@ struct vivid_fmt vivid_formats[] = {
.buffers = 1,
},
{
+ .fourcc = V4L2_PIX_FMT_SBGGR10, /* Bayer BG/GR */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10, /* Bayer GB/RG */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10, /* Bayer GR/BG */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10, /* Bayer RG/GB */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12, /* Bayer BG/GR */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12, /* Bayer GB/RG */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12, /* Bayer GR/BG */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12, /* Bayer RG/GB */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
.fourcc = V4L2_PIX_FMT_NV16M,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 8 },
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index c404e275eae0..b77acb6a7013 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -31,10 +31,11 @@
#include "vivid-kthread-out.h"
#include "vivid-vid-out.h"
-static int vid_out_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int vid_out_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned *nbuffers, unsigned *nplanes,
unsigned sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct vivid_dev *dev = vb2_get_drv_priv(vq);
const struct vivid_fmt *vfmt = dev->fmt_out;
unsigned planes = vfmt->buffers;
@@ -109,6 +110,7 @@ static int vid_out_queue_setup(struct vb2_queue *vq, const struct v4l2_format *f
static int vid_out_buf_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
unsigned long size;
unsigned planes;
@@ -131,14 +133,14 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
}
if (dev->field_out != V4L2_FIELD_ALTERNATE)
- vb->v4l2_buf.field = dev->field_out;
- else if (vb->v4l2_buf.field != V4L2_FIELD_TOP &&
- vb->v4l2_buf.field != V4L2_FIELD_BOTTOM)
+ vbuf->field = dev->field_out;
+ else if (vbuf->field != V4L2_FIELD_TOP &&
+ vbuf->field != V4L2_FIELD_BOTTOM)
return -EINVAL;
for (p = 0; p < planes; p++) {
size = dev->bytesperline_out[p] * dev->fmt_out_rect.height +
- vb->v4l2_planes[p].data_offset;
+ vb->planes[p].data_offset;
if (vb2_get_plane_payload(vb, p) < size) {
dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %lu)\n",
@@ -152,8 +154,9 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
static void vid_out_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
dprintk(dev, 1, "%s\n", __func__);
@@ -186,7 +189,8 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
}
}
return err;
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 3294529a3108..cd5248a9a271 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -200,10 +200,10 @@ static void rpf_vdev_queue(struct vsp1_video *video,
vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
buf->addr[0] + rpf->offsets[0]);
- if (buf->buf.num_planes > 1)
+ if (buf->buf.vb2_buf.num_planes > 1)
vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
buf->addr[1] + rpf->offsets[1]);
- if (buf->buf.num_planes > 2)
+ if (buf->buf.vb2_buf.num_planes > 2)
vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
buf->addr[2] + rpf->offsets[1]);
}
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 3c124c14ce14..5ce88e1f5d71 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -24,7 +24,7 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-subdev.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "vsp1.h"
@@ -610,11 +610,11 @@ vsp1_video_complete_buffer(struct vsp1_video *video)
spin_unlock_irqrestore(&video->irqlock, flags);
- done->buf.v4l2_buf.sequence = video->sequence++;
- v4l2_get_timestamp(&done->buf.v4l2_buf.timestamp);
- for (i = 0; i < done->buf.num_planes; ++i)
- vb2_set_plane_payload(&done->buf, i, done->length[i]);
- vb2_buffer_done(&done->buf, VB2_BUF_STATE_DONE);
+ done->buf.sequence = video->sequence++;
+ v4l2_get_timestamp(&done->buf.timestamp);
+ for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
+ vb2_set_plane_payload(&done->buf.vb2_buf, i, done->length[i]);
+ vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
return next;
}
@@ -787,10 +787,11 @@ void vsp1_pipelines_resume(struct vsp1_device *vsp1)
*/
static int
-vsp1_video_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+vsp1_video_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct vsp1_video *video = vb2_get_drv_priv(vq);
const struct v4l2_pix_format_mplane *format;
struct v4l2_pix_format_mplane pix_mp;
@@ -820,8 +821,9 @@ vsp1_video_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
- struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vb);
+ struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vbuf);
const struct v4l2_pix_format_mplane *format = &video->format;
unsigned int i;
@@ -841,9 +843,10 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
- struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vb);
+ struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vbuf);
unsigned long flags;
bool empty;
@@ -954,7 +957,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
/* Remove all buffers from the IRQ queue. */
spin_lock_irqsave(&video->irqlock, flags);
list_for_each_entry(buffer, &video->irqqueue, queue)
- vb2_buffer_done(&buffer->buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
INIT_LIST_HEAD(&video->irqqueue);
spin_unlock_irqrestore(&video->irqlock, flags);
}
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
index 0887a4d2742c..a929aa81cdbf 100644
--- a/drivers/media/platform/vsp1/vsp1_video.h
+++ b/drivers/media/platform/vsp1/vsp1_video.h
@@ -18,7 +18,7 @@
#include <linux/wait.h>
#include <media/media-entity.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
struct vsp1_video;
@@ -94,7 +94,7 @@ static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
}
struct vsp1_video_buffer {
- struct vb2_buffer buf;
+ struct vb2_v4l2_buffer buf;
struct list_head queue;
dma_addr_t addr[3];
@@ -102,9 +102,9 @@ struct vsp1_video_buffer {
};
static inline struct vsp1_video_buffer *
-to_vsp1_video_buffer(struct vb2_buffer *vb)
+to_vsp1_video_buffer(struct vb2_v4l2_buffer *vbuf)
{
- return container_of(vb, struct vsp1_video_buffer, buf);
+ return container_of(vbuf, struct vsp1_video_buffer, buf);
}
struct vsp1_video_operations {
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 1d2b3a2f1573..95b62f4f77e7 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -201,9 +201,9 @@ static void wpf_vdev_queue(struct vsp1_video *video,
struct vsp1_rwpf *wpf = container_of(video, struct vsp1_rwpf, video);
vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, buf->addr[0]);
- if (buf->buf.num_planes > 1)
+ if (buf->buf.vb2_buf.num_planes > 1)
vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, buf->addr[1]);
- if (buf->buf.num_planes > 2)
+ if (buf->buf.vb2_buf.num_planes > 2)
vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, buf->addr[2]);
}
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index e779c93cb015..d11cc7072cd5 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -22,7 +22,7 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "xilinx-dma.h"
@@ -285,7 +285,7 @@ done:
* @dma: DMA channel that uses the buffer
*/
struct xvip_dma_buffer {
- struct vb2_buffer buf;
+ struct vb2_v4l2_buffer buf;
struct list_head queue;
struct xvip_dma *dma;
};
@@ -301,18 +301,19 @@ static void xvip_dma_complete(void *param)
list_del(&buf->queue);
spin_unlock(&dma->queued_lock);
- buf->buf.v4l2_buf.field = V4L2_FIELD_NONE;
- buf->buf.v4l2_buf.sequence = dma->sequence++;
- v4l2_get_timestamp(&buf->buf.v4l2_buf.timestamp);
- vb2_set_plane_payload(&buf->buf, 0, dma->format.sizeimage);
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
+ buf->buf.field = V4L2_FIELD_NONE;
+ buf->buf.sequence = dma->sequence++;
+ v4l2_get_timestamp(&buf->buf.timestamp);
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
}
static int
-xvip_dma_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+xvip_dma_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct xvip_dma *dma = vb2_get_drv_priv(vq);
/* Make sure the image size is large enough. */
@@ -329,8 +330,9 @@ xvip_dma_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
- struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb);
+ struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
buf->dma = dma;
@@ -339,8 +341,9 @@ static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
- struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb);
+ struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
struct dma_async_tx_descriptor *desc;
dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
u32 flags;
@@ -367,7 +370,7 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
if (!desc) {
dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
return;
}
desc->callback = xvip_dma_complete;
@@ -434,7 +437,7 @@ error:
/* Give back all queued buffers to videobuf2. */
spin_lock_irq(&dma->queued_lock);
list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
list_del(&buf->queue);
}
spin_unlock_irq(&dma->queued_lock);
@@ -461,7 +464,7 @@ static void xvip_dma_stop_streaming(struct vb2_queue *vq)
/* Give back all queued buffers to videobuf2. */
spin_lock_irq(&dma->queued_lock);
list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
list_del(&buf->queue);
}
spin_unlock_irq(&dma->queued_lock);
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
index a540111f8d3d..7a1621a2ef40 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.h
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -22,7 +22,7 @@
#include <media/media-entity.h>
#include <media/v4l2-dev.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
struct dma_chan;
struct xvip_composite_device;
diff --git a/drivers/media/tuners/msi001.c b/drivers/media/tuners/msi001.c
index b533240f8ec0..3a12ef35682b 100644
--- a/drivers/media/tuners/msi001.c
+++ b/drivers/media/tuners/msi001.c
@@ -513,7 +513,6 @@ MODULE_DEVICE_TABLE(spi, msi001_id_table);
static struct spi_driver msi001_driver = {
.driver = {
.name = "msi001",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = msi001_probe,
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 8f2e1c277c5f..fcbb49757614 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -21,6 +21,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
/* AirSpy USB API commands (from AirSpy Library) */
@@ -97,7 +98,8 @@ static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
/* intermediate buffers with raw data from the USB device */
struct airspy_frame_buf {
- struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
@@ -310,13 +312,13 @@ static void airspy_urb_complete(struct urb *urb)
}
/* fill framebuffer */
- ptr = vb2_plane_vaddr(&fbuf->vb, 0);
+ ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
len = airspy_convert_stream(s, ptr, urb->transfer_buffer,
urb->actual_length);
- vb2_set_plane_payload(&fbuf->vb, 0, len);
- v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp);
- fbuf->vb.v4l2_buf.sequence = s->sequence++;
- vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+ vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, len);
+ v4l2_get_timestamp(&fbuf->vb.timestamp);
+ fbuf->vb.sequence = s->sequence++;
+ vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
skip:
usb_submit_urb(urb, GFP_ATOMIC);
@@ -459,7 +461,7 @@ static void airspy_cleanup_queued_bufs(struct airspy *s)
buf = list_entry(s->queued_bufs.next,
struct airspy_frame_buf, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
}
@@ -486,7 +488,7 @@ static void airspy_disconnect(struct usb_interface *intf)
/* Videobuf2 operations */
static int airspy_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt, unsigned int *nbuffers,
+ const void *parg, unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
{
struct airspy *s = vb2_get_drv_priv(vq);
@@ -505,14 +507,15 @@ static int airspy_queue_setup(struct vb2_queue *vq,
static void airspy_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct airspy *s = vb2_get_drv_priv(vb->vb2_queue);
struct airspy_frame_buf *buf =
- container_of(vb, struct airspy_frame_buf, vb);
+ container_of(vbuf, struct airspy_frame_buf, vb);
unsigned long flags;
/* Check the device has not disconnected between prep and queuing */
if (unlikely(!s->udev)) {
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
return;
}
@@ -571,7 +574,8 @@ err_clear_bit:
list_for_each_entry_safe(buf, tmp, &s->queued_bufs, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
}
}
diff --git a/drivers/media/usb/au0828/au0828-vbi.c b/drivers/media/usb/au0828/au0828-vbi.c
index f67247cf1a5a..130c8b49bf7f 100644
--- a/drivers/media/usb/au0828/au0828-vbi.c
+++ b/drivers/media/usb/au0828/au0828-vbi.c
@@ -30,10 +30,11 @@
/* ------------------------------------------------------------------ */
-static int vbi_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int vbi_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct au0828_dev *dev = vb2_get_drv_priv(vq);
unsigned long img_size = dev->vbi_width * dev->vbi_height * 2;
unsigned long size;
@@ -52,7 +53,6 @@ static int vbi_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
static int vbi_buffer_prepare(struct vb2_buffer *vb)
{
struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct au0828_buffer *buf = container_of(vb, struct au0828_buffer, vb);
unsigned long size;
size = dev->vbi_width * dev->vbi_height * 2;
@@ -62,7 +62,7 @@ static int vbi_buffer_prepare(struct vb2_buffer *vb)
__func__, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
- vb2_set_plane_payload(&buf->vb, 0, size);
+ vb2_set_plane_payload(vb, 0, size);
return 0;
}
@@ -71,7 +71,9 @@ static void
vbi_buffer_queue(struct vb2_buffer *vb)
{
struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct au0828_buffer *buf = container_of(vb, struct au0828_buffer, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct au0828_buffer *buf =
+ container_of(vbuf, struct au0828_buffer, vb);
struct au0828_dmaqueue *vbiq = &dev->vbiq;
unsigned long flags = 0;
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 1a362a041ab3..45c622e234f7 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -302,20 +302,20 @@ static inline void buffer_filled(struct au0828_dev *dev,
struct au0828_dmaqueue *dma_q,
struct au0828_buffer *buf)
{
- struct vb2_buffer *vb = &buf->vb;
- struct vb2_queue *q = vb->vb2_queue;
+ struct vb2_v4l2_buffer *vb = &buf->vb;
+ struct vb2_queue *q = vb->vb2_buf.vb2_queue;
/* Advice that buffer was filled */
au0828_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- vb->v4l2_buf.sequence = dev->frame_count++;
+ vb->sequence = dev->frame_count++;
else
- vb->v4l2_buf.sequence = dev->vbi_frame_count++;
+ vb->sequence = dev->vbi_frame_count++;
- vb->v4l2_buf.field = V4L2_FIELD_INTERLACED;
- v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ vb->field = V4L2_FIELD_INTERLACED;
+ v4l2_get_timestamp(&vb->timestamp);
+ vb2_buffer_done(&vb->vb2_buf, VB2_BUF_STATE_DONE);
}
/*
@@ -531,11 +531,11 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
buf = dev->isoc_ctl.buf;
if (buf != NULL)
- outp = vb2_plane_vaddr(&buf->vb, 0);
+ outp = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
vbi_buf = dev->isoc_ctl.vbi_buf;
if (vbi_buf != NULL)
- vbioutp = vb2_plane_vaddr(&vbi_buf->vb, 0);
+ vbioutp = vb2_plane_vaddr(&vbi_buf->vb.vb2_buf, 0);
for (i = 0; i < urb->number_of_packets; i++) {
int status = urb->iso_frame_desc[i].status;
@@ -574,7 +574,7 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
vbioutp = NULL;
else
vbioutp = vb2_plane_vaddr(
- &vbi_buf->vb, 0);
+ &vbi_buf->vb.vb2_buf, 0);
/* Video */
if (buf != NULL)
@@ -583,7 +583,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
if (buf == NULL)
outp = NULL;
else
- outp = vb2_plane_vaddr(&buf->vb, 0);
+ outp = vb2_plane_vaddr(
+ &buf->vb.vb2_buf, 0);
/* As long as isoc traffic is arriving, keep
resetting the timer */
@@ -637,10 +638,11 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
return rc;
}
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct au0828_dev *dev = vb2_get_drv_priv(vq);
unsigned long img_size = dev->height * dev->bytesperline;
unsigned long size;
@@ -658,7 +660,9 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
static int
buffer_prepare(struct vb2_buffer *vb)
{
- struct au0828_buffer *buf = container_of(vb, struct au0828_buffer, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct au0828_buffer *buf = container_of(vbuf,
+ struct au0828_buffer, vb);
struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
buf->length = dev->height * dev->bytesperline;
@@ -668,14 +672,15 @@ buffer_prepare(struct vb2_buffer *vb)
__func__, vb2_plane_size(vb, 0), buf->length);
return -EINVAL;
}
- vb2_set_plane_payload(&buf->vb, 0, buf->length);
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->length);
return 0;
}
static void
buffer_queue(struct vb2_buffer *vb)
{
- struct au0828_buffer *buf = container_of(vb,
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct au0828_buffer *buf = container_of(vbuf,
struct au0828_buffer,
vb);
struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
@@ -826,14 +831,15 @@ static void au0828_stop_streaming(struct vb2_queue *vq)
spin_lock_irqsave(&dev->slock, flags);
if (dev->isoc_ctl.buf != NULL) {
- vb2_buffer_done(&dev->isoc_ctl.buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&dev->isoc_ctl.buf->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
dev->isoc_ctl.buf = NULL;
}
while (!list_empty(&vidq->active)) {
struct au0828_buffer *buf;
buf = list_entry(vidq->active.next, struct au0828_buffer, list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
list_del(&buf->list);
}
spin_unlock_irqrestore(&dev->slock, flags);
@@ -853,7 +859,7 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq)
spin_lock_irqsave(&dev->slock, flags);
if (dev->isoc_ctl.vbi_buf != NULL) {
- vb2_buffer_done(&dev->isoc_ctl.vbi_buf->vb,
+ vb2_buffer_done(&dev->isoc_ctl.vbi_buf->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
dev->isoc_ctl.vbi_buf = NULL;
}
@@ -862,7 +868,7 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq)
buf = list_entry(vbiq->active.next, struct au0828_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock, flags);
@@ -911,7 +917,7 @@ static void au0828_vid_buffer_timeout(unsigned long data)
buf = dev->isoc_ctl.buf;
if (buf != NULL) {
- vid_data = vb2_plane_vaddr(&buf->vb, 0);
+ vid_data = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
memset(vid_data, 0x00, buf->length); /* Blank green frame */
buffer_filled(dev, dma_q, buf);
}
@@ -935,7 +941,7 @@ static void au0828_vbi_buffer_timeout(unsigned long data)
buf = dev->isoc_ctl.vbi_buf;
if (buf != NULL) {
- vbi_data = vb2_plane_vaddr(&buf->vb, 0);
+ vbi_data = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
memset(vbi_data, 0x00, buf->length);
buffer_filled(dev, dma_q, buf);
}
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 3b480005ce3b..60b59391ea2a 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -28,6 +28,7 @@
/* Analog */
#include <linux/videodev2.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
@@ -167,7 +168,7 @@ struct au0828_usb_isoc_ctl {
/* buffer for one video frame */
struct au0828_buffer {
/* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
void *mem;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 9798160698a3..d0d8f08e37c8 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -1114,7 +1114,8 @@ int cx231xx_enum_input(struct file *file, void *priv,
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
u32 gen_stat;
- unsigned int ret, n;
+ unsigned int n;
+ int ret;
n = i->index;
if (n >= MAX_CX231XX_INPUT)
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 0376c092bab8..1dd962535f97 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -847,6 +847,10 @@ static const struct usb_device_id dvbsky_id_table[] = {
USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI,
&dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI",
RC_MAP_TT_1500) },
+ { DVB_USB_DEVICE(USB_VID_TERRATEC,
+ USB_PID_TERRATEC_H7_3,
+ &dvbsky_t680c_props, "Terratec H7 Rev.4",
+ RC_MAP_TT_1500) },
{ }
};
MODULE_DEVICE_TABLE(usb, dvbsky_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 197a4f2e54d2..5a503a6bb8c5 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1896,6 +1896,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl28xxu_props, "MSI DIGIVOX Micro HD", NULL) },
{ DVB_USB_DEVICE(USB_VID_COMPRO, 0x0620,
&rtl28xxu_props, "Compro VideoMate U620F", NULL) },
+ { DVB_USB_DEVICE(USB_VID_COMPRO, 0x0650,
+ &rtl28xxu_props, "Compro VideoMate U650F", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, 0xd394,
&rtl28xxu_props, "MaxMedia HU394-T", NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a03,
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 744e7ed743e1..e23c285b3108 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -31,10 +31,11 @@
/* ------------------------------------------------------------------ */
-static int vbi_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int vbi_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct em28xx *dev = vb2_get_drv_priv(vq);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
unsigned long size;
@@ -61,7 +62,6 @@ static int vbi_buffer_prepare(struct vb2_buffer *vb)
{
struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
- struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
unsigned long size;
size = v4l2->vbi_width * v4l2->vbi_height * 2;
@@ -71,7 +71,7 @@ static int vbi_buffer_prepare(struct vb2_buffer *vb)
__func__, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
- vb2_set_plane_payload(&buf->vb, 0, size);
+ vb2_set_plane_payload(vb, 0, size);
return 0;
}
@@ -79,8 +79,10 @@ static int vbi_buffer_prepare(struct vb2_buffer *vb)
static void
vbi_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
+ struct em28xx_buffer *buf =
+ container_of(vbuf, struct em28xx_buffer, vb);
struct em28xx_dmaqueue *vbiq = &dev->vbiq;
unsigned long flags = 0;
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 4397ce5e78df..6a3cf342e087 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -433,14 +433,14 @@ static inline void finish_buffer(struct em28xx *dev,
{
em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
- buf->vb.v4l2_buf.sequence = dev->v4l2->field_count++;
+ buf->vb.sequence = dev->v4l2->field_count++;
if (dev->v4l2->progressive)
- buf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
+ buf->vb.field = V4L2_FIELD_NONE;
else
- buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ buf->vb.field = V4L2_FIELD_INTERLACED;
+ v4l2_get_timestamp(&buf->vb.timestamp);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
/*
@@ -871,10 +871,11 @@ static void res_free(struct em28xx *dev, enum v4l2_buf_type f_type)
Videobuf2 operations
------------------------------------------------------------------*/
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct em28xx *dev = vb2_get_drv_priv(vq);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
unsigned long size;
@@ -900,12 +901,12 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
static int
buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
- struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
unsigned long size;
- em28xx_videodbg("%s, field=%d\n", __func__, vb->v4l2_buf.field);
+ em28xx_videodbg("%s, field=%d\n", __func__, vbuf->field);
size = (v4l2->width * v4l2->height * v4l2->format->depth + 7) >> 3;
@@ -914,7 +915,7 @@ buffer_prepare(struct vb2_buffer *vb)
__func__, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
- vb2_set_plane_payload(&buf->vb, 0, size);
+ vb2_set_plane_payload(vb, 0, size);
return 0;
}
@@ -924,6 +925,7 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
struct em28xx *dev = vb2_get_drv_priv(vq);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
struct v4l2_frequency f;
+ struct v4l2_fh *owner;
int rc = 0;
em28xx_videodbg("%s\n", __func__);
@@ -964,7 +966,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
/* Ask tuner to go to analog or radio mode */
memset(&f, 0, sizeof(f));
f.frequency = v4l2->frequency;
- if (vq->owner && vq->owner->vdev->vfl_type == VFL_TYPE_RADIO)
+ owner = (struct v4l2_fh *)vq->owner;
+ if (owner && owner->vdev->vfl_type == VFL_TYPE_RADIO)
f.type = V4L2_TUNER_RADIO;
else
f.type = V4L2_TUNER_ANALOG_TV;
@@ -995,7 +998,8 @@ static void em28xx_stop_streaming(struct vb2_queue *vq)
spin_lock_irqsave(&dev->slock, flags);
if (dev->usb_ctl.vid_buf != NULL) {
- vb2_buffer_done(&dev->usb_ctl.vid_buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&dev->usb_ctl.vid_buf->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
dev->usb_ctl.vid_buf = NULL;
}
while (!list_empty(&vidq->active)) {
@@ -1003,7 +1007,7 @@ static void em28xx_stop_streaming(struct vb2_queue *vq)
buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
@@ -1026,7 +1030,8 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
spin_lock_irqsave(&dev->slock, flags);
if (dev->usb_ctl.vbi_buf != NULL) {
- vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
dev->usb_ctl.vbi_buf = NULL;
}
while (!list_empty(&vbiq->active)) {
@@ -1034,7 +1039,7 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
@@ -1042,8 +1047,10 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
static void
buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
+ struct em28xx_buffer *buf =
+ container_of(vbuf, struct em28xx_buffer, vb);
struct em28xx_dmaqueue *vidq = &dev->vidq;
unsigned long flags = 0;
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index e6559c6f143c..76bf8ba372b3 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -35,6 +35,7 @@
#include <linux/kref.h>
#include <linux/videodev2.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
@@ -264,7 +265,7 @@ struct em28xx_fmt {
/* buffer for one video frame */
struct em28xx_buffer {
/* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
void *mem;
diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
index 0ab81ec8897a..ae1cfa792c58 100644
--- a/drivers/media/usb/go7007/go7007-driver.c
+++ b/drivers/media/usb/go7007/go7007-driver.c
@@ -386,10 +386,10 @@ start_error:
*/
static inline void store_byte(struct go7007_buffer *vb, u8 byte)
{
- if (vb && vb->vb.v4l2_planes[0].bytesused < GO7007_BUF_SIZE) {
- u8 *ptr = vb2_plane_vaddr(&vb->vb, 0);
+ if (vb && vb->vb.vb2_buf.planes[0].bytesused < GO7007_BUF_SIZE) {
+ u8 *ptr = vb2_plane_vaddr(&vb->vb.vb2_buf, 0);
- ptr[vb->vb.v4l2_planes[0].bytesused++] = byte;
+ ptr[vb->vb.vb2_buf.planes[0].bytesused++] = byte;
}
}
@@ -401,7 +401,7 @@ static void go7007_set_motion_regions(struct go7007 *go, struct go7007_buffer *v
.type = V4L2_EVENT_MOTION_DET,
.u.motion_det = {
.flags = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ,
- .frame_sequence = vb->vb.v4l2_buf.sequence,
+ .frame_sequence = vb->vb.sequence,
.region_mask = motion_regions,
},
};
@@ -417,7 +417,7 @@ static void go7007_set_motion_regions(struct go7007 *go, struct go7007_buffer *v
*/
static void go7007_motion_regions(struct go7007 *go, struct go7007_buffer *vb)
{
- u32 *bytesused = &vb->vb.v4l2_planes[0].bytesused;
+ u32 *bytesused = &vb->vb.vb2_buf.planes[0].bytesused;
unsigned motion[4] = { 0, 0, 0, 0 };
u32 motion_regions = 0;
unsigned stride = (go->width + 7) >> 3;
@@ -458,25 +458,26 @@ static struct go7007_buffer *frame_boundary(struct go7007 *go, struct go7007_buf
go->next_seq++;
return vb;
}
- bytesused = &vb->vb.v4l2_planes[0].bytesused;
+ bytesused = &vb->vb.vb2_buf.planes[0].bytesused;
- vb->vb.v4l2_buf.sequence = go->next_seq++;
+ vb->vb.sequence = go->next_seq++;
if (vb->modet_active && *bytesused + 216 < GO7007_BUF_SIZE)
go7007_motion_regions(go, vb);
else
go7007_set_motion_regions(go, vb, 0);
- v4l2_get_timestamp(&vb->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&vb->vb.timestamp);
vb_tmp = vb;
spin_lock(&go->spinlock);
list_del(&vb->list);
if (list_empty(&go->vidq_active))
vb = NULL;
else
- vb = list_first_entry(&go->vidq_active, struct go7007_buffer, list);
+ vb = list_first_entry(&go->vidq_active,
+ struct go7007_buffer, list);
go->active_buf = vb;
spin_unlock(&go->spinlock);
- vb2_buffer_done(&vb_tmp->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&vb_tmp->vb.vb2_buf, VB2_BUF_STATE_DONE);
return vb;
}
@@ -519,9 +520,10 @@ void go7007_parse_video_stream(struct go7007 *go, u8 *buf, int length)
}
for (i = 0; i < length; ++i) {
- if (vb && vb->vb.v4l2_planes[0].bytesused >= GO7007_BUF_SIZE - 3) {
+ if (vb && vb->vb.vb2_buf.planes[0].bytesused >=
+ GO7007_BUF_SIZE - 3) {
v4l2_info(&go->v4l2_dev, "dropping oversized frame\n");
- vb->vb.v4l2_planes[0].bytesused = 0;
+ vb->vb.vb2_buf.planes[0].bytesused = 0;
vb->frame_offset = 0;
vb->modet_active = 0;
vb = go->active_buf = NULL;
@@ -601,7 +603,8 @@ void go7007_parse_video_stream(struct go7007 *go, u8 *buf, int length)
vb = frame_boundary(go, vb);
go->seen_frame = buf[i] == frame_start_code;
if (vb && go->seen_frame)
- vb->frame_offset = vb->vb.v4l2_planes[0].bytesused;
+ vb->frame_offset =
+ vb->vb.vb2_buf.planes[0].bytesused;
}
/* Handle any special chunk types, or just write the
* start code to the (potentially new) buffer */
diff --git a/drivers/media/usb/go7007/go7007-fw.c b/drivers/media/usb/go7007/go7007-fw.c
index 5f4c9b9e899a..60bf5f0644d1 100644
--- a/drivers/media/usb/go7007/go7007-fw.c
+++ b/drivers/media/usb/go7007/go7007-fw.c
@@ -379,7 +379,7 @@ static int gen_mjpeghdr_to_package(struct go7007 *go, __le16 *code, int space)
buf = kzalloc(4096, GFP_KERNEL);
if (buf == NULL)
- return -1;
+ return -ENOMEM;
for (i = 1; i < 32; ++i) {
mjpeg_frame_header(go, buf + size, i);
@@ -646,7 +646,7 @@ static int gen_mpeg1hdr_to_package(struct go7007 *go,
buf = kzalloc(5120, GFP_KERNEL);
if (buf == NULL)
- return -1;
+ return -ENOMEM;
framelen[0] = mpeg1_frame_header(go, buf, 0, 1, PFRAME);
if (go->interlace_coding)
@@ -832,7 +832,7 @@ static int gen_mpeg4hdr_to_package(struct go7007 *go,
buf = kzalloc(5120, GFP_KERNEL);
if (buf == NULL)
- return -1;
+ return -ENOMEM;
framelen[0] = mpeg4_frame_header(go, buf, 0, PFRAME);
i = 368;
diff --git a/drivers/media/usb/go7007/go7007-priv.h b/drivers/media/usb/go7007/go7007-priv.h
index 2251c3f99d1d..745185eb060b 100644
--- a/drivers/media/usb/go7007/go7007-priv.h
+++ b/drivers/media/usb/go7007/go7007-priv.h
@@ -20,7 +20,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
struct go7007;
@@ -136,7 +136,7 @@ struct go7007_hpi_ops {
#define GO7007_BUF_SIZE (GO7007_BUF_PAGES << PAGE_SHIFT)
struct go7007_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
unsigned int frame_offset;
u32 modet_active;
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index c57207e268c3..f3d187db9368 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -52,7 +52,7 @@ static bool valid_pixelformat(u32 pixelformat)
static u32 get_frame_type_flag(struct go7007_buffer *vb, int format)
{
- u8 *ptr = vb2_plane_vaddr(&vb->vb, 0);
+ u8 *ptr = vb2_plane_vaddr(&vb->vb.vb2_buf, 0);
switch (format) {
case V4L2_PIX_FMT_MJPEG:
@@ -369,7 +369,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
static int go7007_queue_setup(struct vb2_queue *q,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -386,8 +386,9 @@ static void go7007_buf_queue(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct go7007 *go = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct go7007_buffer *go7007_vb =
- container_of(vb, struct go7007_buffer, vb);
+ container_of(vbuf, struct go7007_buffer, vb);
unsigned long flags;
spin_lock_irqsave(&go->spinlock, flags);
@@ -397,12 +398,13 @@ static void go7007_buf_queue(struct vb2_buffer *vb)
static int go7007_buf_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct go7007_buffer *go7007_vb =
- container_of(vb, struct go7007_buffer, vb);
+ container_of(vbuf, struct go7007_buffer, vb);
go7007_vb->modet_active = 0;
go7007_vb->frame_offset = 0;
- vb->v4l2_planes[0].bytesused = 0;
+ vb->planes[0].bytesused = 0;
return 0;
}
@@ -410,15 +412,15 @@ static void go7007_buf_finish(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct go7007 *go = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct go7007_buffer *go7007_vb =
- container_of(vb, struct go7007_buffer, vb);
+ container_of(vbuf, struct go7007_buffer, vb);
u32 frame_type_flag = get_frame_type_flag(go7007_vb, go->format);
- struct v4l2_buffer *buf = &vb->v4l2_buf;
- buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_BFRAME |
+ vbuf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_BFRAME |
V4L2_BUF_FLAG_PFRAME);
- buf->flags |= frame_type_flag;
- buf->field = V4L2_FIELD_NONE;
+ vbuf->flags |= frame_type_flag;
+ vbuf->field = V4L2_FIELD_NONE;
}
static int go7007_start_streaming(struct vb2_queue *q, unsigned int count)
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index e54cee856a80..af5cd8213e8b 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -436,7 +436,7 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
}
j = gspca_dev->fr_queue[i];
frame = &gspca_dev->frame[j];
- frame->v4l2_buf.timestamp = ktime_to_timeval(ktime_get());
+ v4l2_get_timestamp(&frame->v4l2_buf.timestamp);
frame->v4l2_buf.sequence = gspca_dev->sequence++;
gspca_dev->image = frame->data;
gspca_dev->image_len = 0;
@@ -1909,7 +1909,7 @@ static ssize_t dev_read(struct file *file, char __user *data,
}
/* get a frame */
- timestamp = ktime_to_timeval(ktime_get());
+ v4l2_get_timestamp(&timestamp);
timestamp.tv_sec--;
n = 2;
for (;;) {
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index fd1fa412e094..e05bfec90f46 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -21,6 +21,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
/* HackRF USB API commands (from HackRF Library) */
@@ -31,8 +32,10 @@ enum {
CMD_BOARD_ID_READ = 0x0e,
CMD_VERSION_STRING_READ = 0x0f,
CMD_SET_FREQ = 0x10,
+ CMD_AMP_ENABLE = 0x11,
CMD_SET_LNA_GAIN = 0x13,
CMD_SET_VGA_GAIN = 0x14,
+ CMD_SET_TXVGA_GAIN = 0x15,
};
/*
@@ -43,10 +46,10 @@ enum {
#define MAX_BULK_BUFS (6)
#define BULK_BUFFER_SIZE (128 * 512)
-static const struct v4l2_frequency_band bands_adc[] = {
+static const struct v4l2_frequency_band bands_adc_dac[] = {
{
.tuner = 0,
- .type = V4L2_TUNER_ADC,
+ .type = V4L2_TUNER_SDR,
.index = 0,
.capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
.rangelow = 200000,
@@ -54,7 +57,7 @@ static const struct v4l2_frequency_band bands_adc[] = {
},
};
-static const struct v4l2_frequency_band bands_rf[] = {
+static const struct v4l2_frequency_band bands_rx_tx[] = {
{
.tuner = 1,
.type = V4L2_TUNER_RF,
@@ -67,7 +70,6 @@ static const struct v4l2_frequency_band bands_rf[] = {
/* stream formats */
struct hackrf_format {
- char *name;
u32 pixelformat;
u32 buffersize;
};
@@ -75,7 +77,6 @@ struct hackrf_format {
/* format descriptions for capture and preview */
static struct hackrf_format formats[] = {
{
- .name = "Complex S8",
.pixelformat = V4L2_SDR_FMT_CS8,
.buffersize = BULK_BUFFER_SIZE,
},
@@ -84,28 +85,44 @@ static struct hackrf_format formats[] = {
static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
/* intermediate buffers with raw data from the USB device */
-struct hackrf_frame_buf {
- struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
+struct hackrf_buffer {
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
struct hackrf_dev {
-#define POWER_ON (1 << 1)
-#define URB_BUF (1 << 2)
-#define USB_STATE_URB_BUF (1 << 3)
+#define USB_STATE_URB_BUF 1 /* XXX: set manually */
+#define RX_ON 4
+#define TX_ON 5
+#define RX_ADC_FREQUENCY 11
+#define TX_DAC_FREQUENCY 12
+#define RX_BANDWIDTH 13
+#define TX_BANDWIDTH 14
+#define RX_RF_FREQUENCY 15
+#define TX_RF_FREQUENCY 16
+#define RX_RF_GAIN 17
+#define TX_RF_GAIN 18
+#define RX_IF_GAIN 19
+#define RX_LNA_GAIN 20
+#define TX_LNA_GAIN 21
unsigned long flags;
+ struct usb_interface *intf;
struct device *dev;
struct usb_device *udev;
- struct video_device vdev;
+ struct video_device rx_vdev;
+ struct video_device tx_vdev;
struct v4l2_device v4l2_dev;
/* videobuf2 queue and queued buffers list */
- struct vb2_queue vb_queue;
- struct list_head queued_bufs;
- spinlock_t queued_bufs_lock; /* Protects queued_bufs */
+ struct vb2_queue rx_vb2_queue;
+ struct vb2_queue tx_vb2_queue;
+ struct list_head rx_buffer_list;
+ struct list_head tx_buffer_list;
+ spinlock_t buffer_list_lock; /* Protects buffer_list */
unsigned sequence; /* Buffer sequence counter */
unsigned int vb_full; /* vb is full and packets dropped */
+ unsigned int vb_empty; /* vb is empty and packets dropped */
/* Note if taking both locks v4l2_lock must always be locked first! */
struct mutex v4l2_lock; /* Protects everything else */
@@ -125,16 +142,24 @@ struct hackrf_dev {
/* Current configuration */
unsigned int f_adc;
- unsigned int f_rf;
+ unsigned int f_dac;
+ unsigned int f_rx;
+ unsigned int f_tx;
u32 pixelformat;
u32 buffersize;
/* Controls */
- struct v4l2_ctrl_handler hdl;
- struct v4l2_ctrl *bandwidth_auto;
- struct v4l2_ctrl *bandwidth;
- struct v4l2_ctrl *lna_gain;
- struct v4l2_ctrl *if_gain;
+ struct v4l2_ctrl_handler rx_ctrl_handler;
+ struct v4l2_ctrl *rx_bandwidth_auto;
+ struct v4l2_ctrl *rx_bandwidth;
+ struct v4l2_ctrl *rx_rf_gain;
+ struct v4l2_ctrl *rx_lna_gain;
+ struct v4l2_ctrl *rx_if_gain;
+ struct v4l2_ctrl_handler tx_ctrl_handler;
+ struct v4l2_ctrl *tx_bandwidth_auto;
+ struct v4l2_ctrl *tx_bandwidth;
+ struct v4l2_ctrl *tx_rf_gain;
+ struct v4l2_ctrl *tx_lna_gain;
/* Sample rate calc */
unsigned long jiffies_next;
@@ -164,6 +189,7 @@ static int hackrf_ctrl_msg(struct hackrf_dev *dev, u8 request, u16 value,
switch (request) {
case CMD_SET_TRANSCEIVER_MODE:
case CMD_SET_FREQ:
+ case CMD_AMP_ENABLE:
case CMD_SAMPLE_RATE_SET:
case CMD_BASEBAND_FILTER_BANDWIDTH_SET:
pipe = usb_sndctrlpipe(dev->udev, 0);
@@ -173,6 +199,7 @@ static int hackrf_ctrl_msg(struct hackrf_dev *dev, u8 request, u16 value,
case CMD_VERSION_STRING_READ:
case CMD_SET_LNA_GAIN:
case CMD_SET_VGA_GAIN:
+ case CMD_SET_TXVGA_GAIN:
pipe = usb_rcvctrlpipe(dev->udev, 0);
requesttype = (USB_TYPE_VENDOR | USB_DIR_IN);
break;
@@ -205,25 +232,227 @@ err:
return ret;
}
+static int hackrf_set_params(struct hackrf_dev *dev)
+{
+ struct usb_interface *intf = dev->intf;
+ int ret, i;
+ u8 buf[8], u8tmp;
+ unsigned int uitmp, uitmp1, uitmp2;
+ const bool rx = test_bit(RX_ON, &dev->flags);
+ const bool tx = test_bit(TX_ON, &dev->flags);
+ static const struct {
+ u32 freq;
+ } bandwidth_lut[] = {
+ { 1750000}, /* 1.75 MHz */
+ { 2500000}, /* 2.5 MHz */
+ { 3500000}, /* 3.5 MHz */
+ { 5000000}, /* 5 MHz */
+ { 5500000}, /* 5.5 MHz */
+ { 6000000}, /* 6 MHz */
+ { 7000000}, /* 7 MHz */
+ { 8000000}, /* 8 MHz */
+ { 9000000}, /* 9 MHz */
+ {10000000}, /* 10 MHz */
+ {12000000}, /* 12 MHz */
+ {14000000}, /* 14 MHz */
+ {15000000}, /* 15 MHz */
+ {20000000}, /* 20 MHz */
+ {24000000}, /* 24 MHz */
+ {28000000}, /* 28 MHz */
+ };
+
+ if (!rx && !tx) {
+ dev_dbg(&intf->dev, "device is sleeping\n");
+ return 0;
+ }
+
+ /* ADC / DAC frequency */
+ if (rx && test_and_clear_bit(RX_ADC_FREQUENCY, &dev->flags)) {
+ dev_dbg(&intf->dev, "RX ADC frequency=%u Hz\n", dev->f_adc);
+ uitmp1 = dev->f_adc;
+ uitmp2 = 1;
+ set_bit(TX_DAC_FREQUENCY, &dev->flags);
+ } else if (tx && test_and_clear_bit(TX_DAC_FREQUENCY, &dev->flags)) {
+ dev_dbg(&intf->dev, "TX DAC frequency=%u Hz\n", dev->f_dac);
+ uitmp1 = dev->f_dac;
+ uitmp2 = 1;
+ set_bit(RX_ADC_FREQUENCY, &dev->flags);
+ } else {
+ uitmp1 = uitmp2 = 0;
+ }
+ if (uitmp1 || uitmp2) {
+ buf[0] = (uitmp1 >> 0) & 0xff;
+ buf[1] = (uitmp1 >> 8) & 0xff;
+ buf[2] = (uitmp1 >> 16) & 0xff;
+ buf[3] = (uitmp1 >> 24) & 0xff;
+ buf[4] = (uitmp2 >> 0) & 0xff;
+ buf[5] = (uitmp2 >> 8) & 0xff;
+ buf[6] = (uitmp2 >> 16) & 0xff;
+ buf[7] = (uitmp2 >> 24) & 0xff;
+ ret = hackrf_ctrl_msg(dev, CMD_SAMPLE_RATE_SET, 0, 0, buf, 8);
+ if (ret)
+ goto err;
+ }
+
+ /* bandwidth */
+ if (rx && test_and_clear_bit(RX_BANDWIDTH, &dev->flags)) {
+ if (dev->rx_bandwidth_auto->val == true)
+ uitmp = dev->f_adc;
+ else
+ uitmp = dev->rx_bandwidth->val;
+
+ for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) {
+ if (uitmp <= bandwidth_lut[i].freq) {
+ uitmp = bandwidth_lut[i].freq;
+ break;
+ }
+ }
+ dev->rx_bandwidth->val = uitmp;
+ dev->rx_bandwidth->cur.val = uitmp;
+ dev_dbg(&intf->dev, "RX bandwidth selected=%u\n", uitmp);
+ set_bit(TX_BANDWIDTH, &dev->flags);
+ } else if (tx && test_and_clear_bit(TX_BANDWIDTH, &dev->flags)) {
+ if (dev->tx_bandwidth_auto->val == true)
+ uitmp = dev->f_dac;
+ else
+ uitmp = dev->tx_bandwidth->val;
+
+ for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) {
+ if (uitmp <= bandwidth_lut[i].freq) {
+ uitmp = bandwidth_lut[i].freq;
+ break;
+ }
+ }
+ dev->tx_bandwidth->val = uitmp;
+ dev->tx_bandwidth->cur.val = uitmp;
+ dev_dbg(&intf->dev, "TX bandwidth selected=%u\n", uitmp);
+ set_bit(RX_BANDWIDTH, &dev->flags);
+ } else {
+ uitmp = 0;
+ }
+ if (uitmp) {
+ uitmp1 = uitmp2 = 0;
+ uitmp1 |= ((uitmp >> 0) & 0xff) << 0;
+ uitmp1 |= ((uitmp >> 8) & 0xff) << 8;
+ uitmp2 |= ((uitmp >> 16) & 0xff) << 0;
+ uitmp2 |= ((uitmp >> 24) & 0xff) << 8;
+ ret = hackrf_ctrl_msg(dev, CMD_BASEBAND_FILTER_BANDWIDTH_SET,
+ uitmp1, uitmp2, NULL, 0);
+ if (ret)
+ goto err;
+ }
+
+ /* RX / TX RF frequency */
+ if (rx && test_and_clear_bit(RX_RF_FREQUENCY, &dev->flags)) {
+ dev_dbg(&intf->dev, "RX RF frequency=%u Hz\n", dev->f_rx);
+ uitmp1 = dev->f_rx / 1000000;
+ uitmp2 = dev->f_rx % 1000000;
+ set_bit(TX_RF_FREQUENCY, &dev->flags);
+ } else if (tx && test_and_clear_bit(TX_RF_FREQUENCY, &dev->flags)) {
+ dev_dbg(&intf->dev, "TX RF frequency=%u Hz\n", dev->f_tx);
+ uitmp1 = dev->f_tx / 1000000;
+ uitmp2 = dev->f_tx % 1000000;
+ set_bit(RX_RF_FREQUENCY, &dev->flags);
+ } else {
+ uitmp1 = uitmp2 = 0;
+ }
+ if (uitmp1 || uitmp2) {
+ buf[0] = (uitmp1 >> 0) & 0xff;
+ buf[1] = (uitmp1 >> 8) & 0xff;
+ buf[2] = (uitmp1 >> 16) & 0xff;
+ buf[3] = (uitmp1 >> 24) & 0xff;
+ buf[4] = (uitmp2 >> 0) & 0xff;
+ buf[5] = (uitmp2 >> 8) & 0xff;
+ buf[6] = (uitmp2 >> 16) & 0xff;
+ buf[7] = (uitmp2 >> 24) & 0xff;
+ ret = hackrf_ctrl_msg(dev, CMD_SET_FREQ, 0, 0, buf, 8);
+ if (ret)
+ goto err;
+ }
+
+ /* RX RF gain */
+ if (rx && test_and_clear_bit(RX_RF_GAIN, &dev->flags)) {
+ dev_dbg(&intf->dev, "RX RF gain val=%d->%d\n",
+ dev->rx_rf_gain->cur.val, dev->rx_rf_gain->val);
+
+ u8tmp = (dev->rx_rf_gain->val) ? 1 : 0;
+ ret = hackrf_ctrl_msg(dev, CMD_AMP_ENABLE, u8tmp, 0, NULL, 0);
+ if (ret)
+ goto err;
+ set_bit(TX_RF_GAIN, &dev->flags);
+ }
+
+ /* TX RF gain */
+ if (tx && test_and_clear_bit(TX_RF_GAIN, &dev->flags)) {
+ dev_dbg(&intf->dev, "TX RF gain val=%d->%d\n",
+ dev->tx_rf_gain->cur.val, dev->tx_rf_gain->val);
+
+ u8tmp = (dev->tx_rf_gain->val) ? 1 : 0;
+ ret = hackrf_ctrl_msg(dev, CMD_AMP_ENABLE, u8tmp, 0, NULL, 0);
+ if (ret)
+ goto err;
+ set_bit(RX_RF_GAIN, &dev->flags);
+ }
+
+ /* RX LNA gain */
+ if (rx && test_and_clear_bit(RX_LNA_GAIN, &dev->flags)) {
+ dev_dbg(dev->dev, "RX LNA gain val=%d->%d\n",
+ dev->rx_lna_gain->cur.val, dev->rx_lna_gain->val);
+
+ ret = hackrf_ctrl_msg(dev, CMD_SET_LNA_GAIN, 0,
+ dev->rx_lna_gain->val, &u8tmp, 1);
+ if (ret)
+ goto err;
+ }
+
+ /* RX IF gain */
+ if (rx && test_and_clear_bit(RX_IF_GAIN, &dev->flags)) {
+ dev_dbg(&intf->dev, "IF gain val=%d->%d\n",
+ dev->rx_if_gain->cur.val, dev->rx_if_gain->val);
+
+ ret = hackrf_ctrl_msg(dev, CMD_SET_VGA_GAIN, 0,
+ dev->rx_if_gain->val, &u8tmp, 1);
+ if (ret)
+ goto err;
+ }
+
+ /* TX LNA gain */
+ if (tx && test_and_clear_bit(TX_LNA_GAIN, &dev->flags)) {
+ dev_dbg(&intf->dev, "TX LNA gain val=%d->%d\n",
+ dev->tx_lna_gain->cur.val, dev->tx_lna_gain->val);
+
+ ret = hackrf_ctrl_msg(dev, CMD_SET_TXVGA_GAIN, 0,
+ dev->tx_lna_gain->val, &u8tmp, 1);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
+ return ret;
+}
+
/* Private functions */
-static struct hackrf_frame_buf *hackrf_get_next_fill_buf(struct hackrf_dev *dev)
+static struct hackrf_buffer *hackrf_get_next_buffer(struct hackrf_dev *dev,
+ struct list_head *buffer_list)
{
unsigned long flags;
- struct hackrf_frame_buf *buf = NULL;
+ struct hackrf_buffer *buffer = NULL;
- spin_lock_irqsave(&dev->queued_bufs_lock, flags);
- if (list_empty(&dev->queued_bufs))
+ spin_lock_irqsave(&dev->buffer_list_lock, flags);
+ if (list_empty(buffer_list))
goto leave;
- buf = list_entry(dev->queued_bufs.next, struct hackrf_frame_buf, list);
- list_del(&buf->list);
+ buffer = list_entry(buffer_list->next, struct hackrf_buffer, list);
+ list_del(&buffer->list);
leave:
- spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
- return buf;
+ spin_unlock_irqrestore(&dev->buffer_list_lock, flags);
+ return buffer;
}
-static unsigned int hackrf_convert_stream(struct hackrf_dev *dev,
- void *dst, void *src, unsigned int src_len)
+static void hackrf_copy_stream(struct hackrf_dev *dev, void *dst, void *src,
+ unsigned int src_len)
{
memcpy(dst, src, src_len);
@@ -243,22 +472,21 @@ static unsigned int hackrf_convert_stream(struct hackrf_dev *dev,
/* total number of samples */
dev->sample += src_len / 2;
-
- return src_len;
}
/*
* This gets called for the bulk stream pipe. This is done in interrupt
* time, so it has to be fast, not crash, and not stall. Neat.
*/
-static void hackrf_urb_complete(struct urb *urb)
+static void hackrf_urb_complete_in(struct urb *urb)
{
struct hackrf_dev *dev = urb->context;
- struct hackrf_frame_buf *fbuf;
+ struct usb_interface *intf = dev->intf;
+ struct hackrf_buffer *buffer;
+ unsigned int len;
- dev_dbg_ratelimited(dev->dev, "status=%d length=%d/%d errors=%d\n",
- urb->status, urb->actual_length,
- urb->transfer_buffer_length, urb->error_count);
+ dev_dbg_ratelimited(&intf->dev, "status=%d length=%u/%u\n", urb->status,
+ urb->actual_length, urb->transfer_buffer_length);
switch (urb->status) {
case 0: /* success */
@@ -269,33 +497,74 @@ static void hackrf_urb_complete(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- dev_err_ratelimited(dev->dev, "URB failed %d\n", urb->status);
- break;
+ dev_err_ratelimited(&intf->dev, "URB failed %d\n", urb->status);
+ goto exit_usb_submit_urb;
}
- if (likely(urb->actual_length > 0)) {
- void *ptr;
- unsigned int len;
- /* get free framebuffer */
- fbuf = hackrf_get_next_fill_buf(dev);
- if (unlikely(fbuf == NULL)) {
- dev->vb_full++;
- dev_notice_ratelimited(dev->dev,
- "videobuf is full, %d packets dropped\n",
- dev->vb_full);
- goto skip;
- }
+ /* get buffer to write */
+ buffer = hackrf_get_next_buffer(dev, &dev->rx_buffer_list);
+ if (unlikely(buffer == NULL)) {
+ dev->vb_full++;
+ dev_notice_ratelimited(&intf->dev,
+ "buffer is full - %u packets dropped\n",
+ dev->vb_full);
+ goto exit_usb_submit_urb;
+ }
+
+ len = min_t(unsigned long, vb2_plane_size(&buffer->vb.vb2_buf, 0),
+ urb->actual_length);
+ hackrf_copy_stream(dev, vb2_plane_vaddr(&buffer->vb.vb2_buf, 0),
+ urb->transfer_buffer, len);
+ vb2_set_plane_payload(&buffer->vb.vb2_buf, 0, len);
+ buffer->vb.sequence = dev->sequence++;
+ v4l2_get_timestamp(&buffer->vb.timestamp);
+ vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_DONE);
+exit_usb_submit_urb:
+ usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+static void hackrf_urb_complete_out(struct urb *urb)
+{
+ struct hackrf_dev *dev = urb->context;
+ struct usb_interface *intf = dev->intf;
+ struct hackrf_buffer *buffer;
+ unsigned int len;
+
+ dev_dbg_ratelimited(&intf->dev, "status=%d length=%u/%u\n", urb->status,
+ urb->actual_length, urb->transfer_buffer_length);
+
+ switch (urb->status) {
+ case 0: /* success */
+ case -ETIMEDOUT: /* NAK */
+ break;
+ case -ECONNRESET: /* kill */
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default: /* error */
+ dev_err_ratelimited(&intf->dev, "URB failed %d\n", urb->status);
+ }
- /* fill framebuffer */
- ptr = vb2_plane_vaddr(&fbuf->vb, 0);
- len = hackrf_convert_stream(dev, ptr, urb->transfer_buffer,
- urb->actual_length);
- vb2_set_plane_payload(&fbuf->vb, 0, len);
- v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp);
- fbuf->vb.v4l2_buf.sequence = dev->sequence++;
- vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+ /* get buffer to read */
+ buffer = hackrf_get_next_buffer(dev, &dev->tx_buffer_list);
+ if (unlikely(buffer == NULL)) {
+ dev->vb_empty++;
+ dev_notice_ratelimited(&intf->dev,
+ "buffer is empty - %u packets dropped\n",
+ dev->vb_empty);
+ urb->actual_length = 0;
+ goto exit_usb_submit_urb;
}
-skip:
+
+ len = min_t(unsigned long, urb->transfer_buffer_length,
+ vb2_get_plane_payload(&buffer->vb.vb2_buf, 0));
+ hackrf_copy_stream(dev, urb->transfer_buffer,
+ vb2_plane_vaddr(&buffer->vb.vb2_buf, 0), len);
+ urb->actual_length = len;
+ buffer->vb.sequence = dev->sequence++;
+ v4l2_get_timestamp(&buffer->vb.timestamp);
+ vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_DONE);
+exit_usb_submit_urb:
usb_submit_urb(urb, GFP_ATOMIC);
}
@@ -394,9 +663,19 @@ static int hackrf_free_urbs(struct hackrf_dev *dev)
return 0;
}
-static int hackrf_alloc_urbs(struct hackrf_dev *dev)
+static int hackrf_alloc_urbs(struct hackrf_dev *dev, bool rcv)
{
int i, j;
+ unsigned int pipe;
+ usb_complete_t complete;
+
+ if (rcv) {
+ pipe = usb_rcvbulkpipe(dev->udev, 0x81);
+ complete = &hackrf_urb_complete_in;
+ } else {
+ pipe = usb_sndbulkpipe(dev->udev, 0x02);
+ complete = &hackrf_urb_complete_out;
+ }
/* allocate the URBs */
for (i = 0; i < MAX_BULK_BUFS; i++) {
@@ -410,10 +689,10 @@ static int hackrf_alloc_urbs(struct hackrf_dev *dev)
}
usb_fill_bulk_urb(dev->urb_list[i],
dev->udev,
- usb_rcvbulkpipe(dev->udev, 0x81),
+ pipe,
dev->buf_list[i],
BULK_BUFFER_SIZE,
- hackrf_urb_complete, dev);
+ complete, dev);
dev->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
dev->urb_list[i]->transfer_dma = dev->dma_addr[i];
@@ -423,25 +702,6 @@ static int hackrf_alloc_urbs(struct hackrf_dev *dev)
return 0;
}
-/* Must be called with vb_queue_lock hold */
-static void hackrf_cleanup_queued_bufs(struct hackrf_dev *dev)
-{
- unsigned long flags;
-
- dev_dbg(dev->dev, "\n");
-
- spin_lock_irqsave(&dev->queued_bufs_lock, flags);
- while (!list_empty(&dev->queued_bufs)) {
- struct hackrf_frame_buf *buf;
-
- buf = list_entry(dev->queued_bufs.next,
- struct hackrf_frame_buf, list);
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
- }
- spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
-}
-
/* The user yanked out the cable... */
static void hackrf_disconnect(struct usb_interface *intf)
{
@@ -455,7 +715,8 @@ static void hackrf_disconnect(struct usb_interface *intf)
/* No need to keep the urbs around after disconnection */
dev->udev = NULL;
v4l2_device_disconnect(&dev->v4l2_dev);
- video_unregister_device(&dev->vdev);
+ video_unregister_device(&dev->tx_vdev);
+ video_unregister_device(&dev->rx_vdev);
mutex_unlock(&dev->v4l2_lock);
mutex_unlock(&dev->vb_queue_lock);
@@ -463,8 +724,33 @@ static void hackrf_disconnect(struct usb_interface *intf)
}
/* Videobuf2 operations */
+static void hackrf_return_all_buffers(struct vb2_queue *vq,
+ enum vb2_buffer_state state)
+{
+ struct hackrf_dev *dev = vb2_get_drv_priv(vq);
+ struct usb_interface *intf = dev->intf;
+ struct hackrf_buffer *buffer, *node;
+ struct list_head *buffer_list;
+ unsigned long flags;
+
+ dev_dbg(&intf->dev, "\n");
+
+ if (vq->type == V4L2_BUF_TYPE_SDR_CAPTURE)
+ buffer_list = &dev->rx_buffer_list;
+ else
+ buffer_list = &dev->tx_buffer_list;
+
+ spin_lock_irqsave(&dev->buffer_list_lock, flags);
+ list_for_each_entry_safe(buffer, node, buffer_list, list) {
+ dev_dbg(&intf->dev, "list_for_each_entry_safe\n");
+ vb2_buffer_done(&buffer->vb.vb2_buf, state);
+ list_del(&buffer->list);
+ }
+ spin_unlock_irqrestore(&dev->buffer_list_lock, flags);
+}
+
static int hackrf_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt, unsigned int *nbuffers,
+ const void *parg, unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
{
struct hackrf_dev *dev = vb2_get_drv_priv(vq);
@@ -483,37 +769,62 @@ static int hackrf_queue_setup(struct vb2_queue *vq,
static void hackrf_buf_queue(struct vb2_buffer *vb)
{
- struct hackrf_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct hackrf_frame_buf *buf =
- container_of(vb, struct hackrf_frame_buf, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct hackrf_dev *dev = vb2_get_drv_priv(vq);
+ struct hackrf_buffer *buffer = container_of(vbuf, struct hackrf_buffer, vb);
+ struct list_head *buffer_list;
unsigned long flags;
- spin_lock_irqsave(&dev->queued_bufs_lock, flags);
- list_add_tail(&buf->list, &dev->queued_bufs);
- spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
+ dev_dbg_ratelimited(&dev->intf->dev, "\n");
+
+ if (vq->type == V4L2_BUF_TYPE_SDR_CAPTURE)
+ buffer_list = &dev->rx_buffer_list;
+ else
+ buffer_list = &dev->tx_buffer_list;
+
+ spin_lock_irqsave(&dev->buffer_list_lock, flags);
+ list_add_tail(&buffer->list, buffer_list);
+ spin_unlock_irqrestore(&dev->buffer_list_lock, flags);
}
static int hackrf_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct hackrf_dev *dev = vb2_get_drv_priv(vq);
+ struct usb_interface *intf = dev->intf;
int ret;
+ unsigned int mode;
- dev_dbg(dev->dev, "\n");
-
- if (!dev->udev)
- return -ENODEV;
+ dev_dbg(&intf->dev, "count=%i\n", count);
mutex_lock(&dev->v4l2_lock);
- dev->sequence = 0;
+ /* Allow only RX or TX, not both same time */
+ if (vq->type == V4L2_BUF_TYPE_SDR_CAPTURE) {
+ if (test_bit(TX_ON, &dev->flags)) {
+ ret = -EBUSY;
+ goto err_hackrf_return_all_buffers;
+ }
+
+ mode = 1;
+ set_bit(RX_ON, &dev->flags);
+ } else {
+ if (test_bit(RX_ON, &dev->flags)) {
+ ret = -EBUSY;
+ goto err_hackrf_return_all_buffers;
+ }
+
+ mode = 2;
+ set_bit(TX_ON, &dev->flags);
+ }
- set_bit(POWER_ON, &dev->flags);
+ dev->sequence = 0;
ret = hackrf_alloc_stream_bufs(dev);
if (ret)
goto err;
- ret = hackrf_alloc_urbs(dev);
+ ret = hackrf_alloc_urbs(dev, (mode == 1));
if (ret)
goto err;
@@ -521,39 +832,37 @@ static int hackrf_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret)
goto err;
+ ret = hackrf_set_params(dev);
+ if (ret)
+ goto err;
+
/* start hardware streaming */
- ret = hackrf_ctrl_msg(dev, CMD_SET_TRANSCEIVER_MODE, 1, 0, NULL, 0);
+ ret = hackrf_ctrl_msg(dev, CMD_SET_TRANSCEIVER_MODE, mode, 0, NULL, 0);
if (ret)
goto err;
- goto exit_mutex_unlock;
+ mutex_unlock(&dev->v4l2_lock);
+
+ return 0;
err:
hackrf_kill_urbs(dev);
hackrf_free_urbs(dev);
hackrf_free_stream_bufs(dev);
- clear_bit(POWER_ON, &dev->flags);
-
- /* return all queued buffers to vb2 */
- {
- struct hackrf_frame_buf *buf, *tmp;
-
- list_for_each_entry_safe(buf, tmp, &dev->queued_bufs, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
- }
- }
-
-exit_mutex_unlock:
+ clear_bit(RX_ON, &dev->flags);
+ clear_bit(TX_ON, &dev->flags);
+err_hackrf_return_all_buffers:
+ hackrf_return_all_buffers(vq, VB2_BUF_STATE_QUEUED);
mutex_unlock(&dev->v4l2_lock);
-
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
static void hackrf_stop_streaming(struct vb2_queue *vq)
{
struct hackrf_dev *dev = vb2_get_drv_priv(vq);
+ struct usb_interface *intf = dev->intf;
- dev_dbg(dev->dev, "\n");
+ dev_dbg(&intf->dev, "\n");
mutex_lock(&dev->v4l2_lock);
@@ -564,9 +873,12 @@ static void hackrf_stop_streaming(struct vb2_queue *vq)
hackrf_free_urbs(dev);
hackrf_free_stream_bufs(dev);
- hackrf_cleanup_queued_bufs(dev);
+ hackrf_return_all_buffers(vq, VB2_BUF_STATE_ERROR);
- clear_bit(POWER_ON, &dev->flags);
+ if (vq->type == V4L2_BUF_TYPE_SDR_CAPTURE)
+ clear_bit(RX_ON, &dev->flags);
+ else
+ clear_bit(TX_ON, &dev->flags);
mutex_unlock(&dev->v4l2_lock);
}
@@ -584,29 +896,46 @@ static int hackrf_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
struct hackrf_dev *dev = video_drvdata(file);
+ struct usb_interface *intf = dev->intf;
+ struct video_device *vdev = video_devdata(file);
- dev_dbg(dev->dev, "\n");
+ dev_dbg(&intf->dev, "\n");
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ cap->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ else
+ cap->device_caps = V4L2_CAP_SDR_OUTPUT | V4L2_CAP_MODULATOR |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+
+ cap->capabilities = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER |
+ V4L2_CAP_SDR_OUTPUT | V4L2_CAP_MODULATOR |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE |
+ V4L2_CAP_DEVICE_CAPS;
strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
- strlcpy(cap->card, dev->vdev.name, sizeof(cap->card));
+ strlcpy(cap->card, dev->rx_vdev.name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE | V4L2_CAP_TUNER;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
-static int hackrf_s_fmt_sdr_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+static int hackrf_s_fmt_sdr(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct hackrf_dev *dev = video_drvdata(file);
- struct vb2_queue *q = &dev->vb_queue;
+ struct video_device *vdev = video_devdata(file);
+ struct vb2_queue *q;
int i;
dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n",
(char *)&f->fmt.sdr.pixelformat);
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ q = &dev->rx_vb2_queue;
+ else
+ q = &dev->tx_vb2_queue;
+
if (vb2_is_busy(q))
return -EBUSY;
@@ -628,8 +957,8 @@ static int hackrf_s_fmt_sdr_cap(struct file *file, void *priv,
return 0;
}
-static int hackrf_g_fmt_sdr_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+static int hackrf_g_fmt_sdr(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct hackrf_dev *dev = video_drvdata(file);
@@ -643,8 +972,8 @@ static int hackrf_g_fmt_sdr_cap(struct file *file, void *priv,
return 0;
}
-static int hackrf_try_fmt_sdr_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+static int hackrf_try_fmt_sdr(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct hackrf_dev *dev = video_drvdata(file);
int i;
@@ -666,8 +995,8 @@ static int hackrf_try_fmt_sdr_cap(struct file *file, void *priv,
return 0;
}
-static int hackrf_enum_fmt_sdr_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int hackrf_enum_fmt_sdr(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
struct hackrf_dev *dev = video_drvdata(file);
@@ -676,7 +1005,6 @@ static int hackrf_enum_fmt_sdr_cap(struct file *file, void *priv,
if (f->index >= NUM_FORMATS)
return -EINVAL;
- strlcpy(f->description, formats[f->index].name, sizeof(f->description));
f->pixelformat = formats[f->index].pixelformat;
return 0;
@@ -709,17 +1037,56 @@ static int hackrf_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v)
if (v->index == 0) {
strlcpy(v->name, "HackRF ADC", sizeof(v->name));
- v->type = V4L2_TUNER_ADC;
+ v->type = V4L2_TUNER_SDR;
v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
- v->rangelow = bands_adc[0].rangelow;
- v->rangehigh = bands_adc[0].rangehigh;
+ v->rangelow = bands_adc_dac[0].rangelow;
+ v->rangehigh = bands_adc_dac[0].rangehigh;
ret = 0;
} else if (v->index == 1) {
strlcpy(v->name, "HackRF RF", sizeof(v->name));
v->type = V4L2_TUNER_RF;
v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
- v->rangelow = bands_rf[0].rangelow;
- v->rangehigh = bands_rf[0].rangehigh;
+ v->rangelow = bands_rx_tx[0].rangelow;
+ v->rangehigh = bands_rx_tx[0].rangehigh;
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int hackrf_s_modulator(struct file *file, void *fh,
+ const struct v4l2_modulator *a)
+{
+ struct hackrf_dev *dev = video_drvdata(file);
+
+ dev_dbg(dev->dev, "index=%d\n", a->index);
+
+ return a->index > 1 ? -EINVAL : 0;
+}
+
+static int hackrf_g_modulator(struct file *file, void *fh,
+ struct v4l2_modulator *a)
+{
+ struct hackrf_dev *dev = video_drvdata(file);
+ int ret;
+
+ dev_dbg(dev->dev, "index=%d\n", a->index);
+
+ if (a->index == 0) {
+ strlcpy(a->name, "HackRF DAC", sizeof(a->name));
+ a->type = V4L2_TUNER_SDR;
+ a->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ a->rangelow = bands_adc_dac[0].rangelow;
+ a->rangehigh = bands_adc_dac[0].rangehigh;
+ ret = 0;
+ } else if (a->index == 1) {
+ strlcpy(a->name, "HackRF RF", sizeof(a->name));
+ a->type = V4L2_TUNER_RF;
+ a->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ a->rangelow = bands_rx_tx[0].rangelow;
+ a->rangehigh = bands_rx_tx[0].rangehigh;
ret = 0;
} else {
ret = -EINVAL;
@@ -732,47 +1099,46 @@ static int hackrf_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
struct hackrf_dev *dev = video_drvdata(file);
+ struct usb_interface *intf = dev->intf;
+ struct video_device *vdev = video_devdata(file);
int ret;
- unsigned int upper, lower;
- u8 buf[8];
+ unsigned int uitmp;
- dev_dbg(dev->dev, "tuner=%d type=%d frequency=%u\n",
+ dev_dbg(&intf->dev, "tuner=%d type=%d frequency=%u\n",
f->tuner, f->type, f->frequency);
if (f->tuner == 0) {
- dev->f_adc = clamp_t(unsigned int, f->frequency,
- bands_adc[0].rangelow, bands_adc[0].rangehigh);
- dev_dbg(dev->dev, "ADC frequency=%u Hz\n", dev->f_adc);
- upper = dev->f_adc;
- lower = 1;
- buf[0] = (upper >> 0) & 0xff;
- buf[1] = (upper >> 8) & 0xff;
- buf[2] = (upper >> 16) & 0xff;
- buf[3] = (upper >> 24) & 0xff;
- buf[4] = (lower >> 0) & 0xff;
- buf[5] = (lower >> 8) & 0xff;
- buf[6] = (lower >> 16) & 0xff;
- buf[7] = (lower >> 24) & 0xff;
- ret = hackrf_ctrl_msg(dev, CMD_SAMPLE_RATE_SET, 0, 0, buf, 8);
+ uitmp = clamp(f->frequency, bands_adc_dac[0].rangelow,
+ bands_adc_dac[0].rangehigh);
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ dev->f_adc = uitmp;
+ set_bit(RX_ADC_FREQUENCY, &dev->flags);
+ } else {
+ dev->f_dac = uitmp;
+ set_bit(TX_DAC_FREQUENCY, &dev->flags);
+ }
} else if (f->tuner == 1) {
- dev->f_rf = clamp_t(unsigned int, f->frequency,
- bands_rf[0].rangelow, bands_rf[0].rangehigh);
- dev_dbg(dev->dev, "RF frequency=%u Hz\n", dev->f_rf);
- upper = dev->f_rf / 1000000;
- lower = dev->f_rf % 1000000;
- buf[0] = (upper >> 0) & 0xff;
- buf[1] = (upper >> 8) & 0xff;
- buf[2] = (upper >> 16) & 0xff;
- buf[3] = (upper >> 24) & 0xff;
- buf[4] = (lower >> 0) & 0xff;
- buf[5] = (lower >> 8) & 0xff;
- buf[6] = (lower >> 16) & 0xff;
- buf[7] = (lower >> 24) & 0xff;
- ret = hackrf_ctrl_msg(dev, CMD_SET_FREQ, 0, 0, buf, 8);
+ uitmp = clamp(f->frequency, bands_rx_tx[0].rangelow,
+ bands_rx_tx[0].rangehigh);
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ dev->f_rx = uitmp;
+ set_bit(RX_RF_FREQUENCY, &dev->flags);
+ } else {
+ dev->f_tx = uitmp;
+ set_bit(TX_RF_FREQUENCY, &dev->flags);
+ }
} else {
ret = -EINVAL;
+ goto err;
}
+ ret = hackrf_set_params(dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -780,22 +1146,32 @@ static int hackrf_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct hackrf_dev *dev = video_drvdata(file);
+ struct usb_interface *intf = dev->intf;
+ struct video_device *vdev = video_devdata(file);
int ret;
dev_dbg(dev->dev, "tuner=%d type=%d\n", f->tuner, f->type);
if (f->tuner == 0) {
- f->type = V4L2_TUNER_ADC;
- f->frequency = dev->f_adc;
- ret = 0;
+ f->type = V4L2_TUNER_SDR;
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ f->frequency = dev->f_adc;
+ else
+ f->frequency = dev->f_dac;
} else if (f->tuner == 1) {
f->type = V4L2_TUNER_RF;
- f->frequency = dev->f_rf;
- ret = 0;
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ f->frequency = dev->f_rx;
+ else
+ f->frequency = dev->f_tx;
} else {
ret = -EINVAL;
+ goto err;
}
+ return 0;
+err:
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -809,17 +1185,17 @@ static int hackrf_enum_freq_bands(struct file *file, void *priv,
band->tuner, band->type, band->index);
if (band->tuner == 0) {
- if (band->index >= ARRAY_SIZE(bands_adc)) {
+ if (band->index >= ARRAY_SIZE(bands_adc_dac)) {
ret = -EINVAL;
} else {
- *band = bands_adc[band->index];
+ *band = bands_adc_dac[band->index];
ret = 0;
}
} else if (band->tuner == 1) {
- if (band->index >= ARRAY_SIZE(bands_rf)) {
+ if (band->index >= ARRAY_SIZE(bands_rx_tx)) {
ret = -EINVAL;
} else {
- *band = bands_rf[band->index];
+ *band = bands_rx_tx[band->index];
ret = 0;
}
} else {
@@ -832,10 +1208,15 @@ static int hackrf_enum_freq_bands(struct file *file, void *priv,
static const struct v4l2_ioctl_ops hackrf_ioctl_ops = {
.vidioc_querycap = hackrf_querycap,
- .vidioc_s_fmt_sdr_cap = hackrf_s_fmt_sdr_cap,
- .vidioc_g_fmt_sdr_cap = hackrf_g_fmt_sdr_cap,
- .vidioc_enum_fmt_sdr_cap = hackrf_enum_fmt_sdr_cap,
- .vidioc_try_fmt_sdr_cap = hackrf_try_fmt_sdr_cap,
+ .vidioc_s_fmt_sdr_cap = hackrf_s_fmt_sdr,
+ .vidioc_g_fmt_sdr_cap = hackrf_g_fmt_sdr,
+ .vidioc_enum_fmt_sdr_cap = hackrf_enum_fmt_sdr,
+ .vidioc_try_fmt_sdr_cap = hackrf_try_fmt_sdr,
+
+ .vidioc_s_fmt_sdr_out = hackrf_s_fmt_sdr,
+ .vidioc_g_fmt_sdr_out = hackrf_g_fmt_sdr,
+ .vidioc_enum_fmt_sdr_out = hackrf_enum_fmt_sdr,
+ .vidioc_try_fmt_sdr_out = hackrf_try_fmt_sdr,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
@@ -843,6 +1224,7 @@ static const struct v4l2_ioctl_ops hackrf_ioctl_ops = {
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
@@ -850,6 +1232,9 @@ static const struct v4l2_ioctl_ops hackrf_ioctl_ops = {
.vidioc_s_tuner = hackrf_s_tuner,
.vidioc_g_tuner = hackrf_g_tuner,
+ .vidioc_s_modulator = hackrf_s_modulator,
+ .vidioc_g_modulator = hackrf_g_modulator,
+
.vidioc_s_frequency = hackrf_s_frequency,
.vidioc_g_frequency = hackrf_g_frequency,
.vidioc_enum_freq_bands = hackrf_enum_freq_bands,
@@ -864,6 +1249,7 @@ static const struct v4l2_file_operations hackrf_fops = {
.open = v4l2_fh_open,
.release = vb2_fop_release,
.read = vb2_fop_read,
+ .write = vb2_fop_write,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
@@ -880,135 +1266,93 @@ static void hackrf_video_release(struct v4l2_device *v)
{
struct hackrf_dev *dev = container_of(v, struct hackrf_dev, v4l2_dev);
- v4l2_ctrl_handler_free(&dev->hdl);
+ dev_dbg(dev->dev, "\n");
+
+ v4l2_ctrl_handler_free(&dev->rx_ctrl_handler);
+ v4l2_ctrl_handler_free(&dev->tx_ctrl_handler);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
}
-static int hackrf_set_bandwidth(struct hackrf_dev *dev)
-{
- int ret, i;
- u16 u16tmp, u16tmp2;
- unsigned int bandwidth;
-
- static const struct {
- u32 freq;
- } bandwidth_lut[] = {
- { 1750000}, /* 1.75 MHz */
- { 2500000}, /* 2.5 MHz */
- { 3500000}, /* 3.5 MHz */
- { 5000000}, /* 5 MHz */
- { 5500000}, /* 5.5 MHz */
- { 6000000}, /* 6 MHz */
- { 7000000}, /* 7 MHz */
- { 8000000}, /* 8 MHz */
- { 9000000}, /* 9 MHz */
- {10000000}, /* 10 MHz */
- {12000000}, /* 12 MHz */
- {14000000}, /* 14 MHz */
- {15000000}, /* 15 MHz */
- {20000000}, /* 20 MHz */
- {24000000}, /* 24 MHz */
- {28000000}, /* 28 MHz */
- };
-
- dev_dbg(dev->dev, "bandwidth auto=%d->%d val=%d->%d f_adc=%u\n",
- dev->bandwidth_auto->cur.val,
- dev->bandwidth_auto->val, dev->bandwidth->cur.val,
- dev->bandwidth->val, dev->f_adc);
-
- if (dev->bandwidth_auto->val == true)
- bandwidth = dev->f_adc;
- else
- bandwidth = dev->bandwidth->val;
-
- for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) {
- if (bandwidth <= bandwidth_lut[i].freq) {
- bandwidth = bandwidth_lut[i].freq;
- break;
- }
- }
-
- dev->bandwidth->val = bandwidth;
- dev->bandwidth->cur.val = bandwidth;
-
- dev_dbg(dev->dev, "bandwidth selected=%d\n", bandwidth);
-
- u16tmp = 0;
- u16tmp |= ((bandwidth >> 0) & 0xff) << 0;
- u16tmp |= ((bandwidth >> 8) & 0xff) << 8;
- u16tmp2 = 0;
- u16tmp2 |= ((bandwidth >> 16) & 0xff) << 0;
- u16tmp2 |= ((bandwidth >> 24) & 0xff) << 8;
-
- ret = hackrf_ctrl_msg(dev, CMD_BASEBAND_FILTER_BANDWIDTH_SET,
- u16tmp, u16tmp2, NULL, 0);
- if (ret)
- dev_dbg(dev->dev, "failed=%d\n", ret);
-
- return ret;
-}
-
-static int hackrf_set_lna_gain(struct hackrf_dev *dev)
-{
- int ret;
- u8 u8tmp;
-
- dev_dbg(dev->dev, "lna val=%d->%d\n",
- dev->lna_gain->cur.val, dev->lna_gain->val);
-
- ret = hackrf_ctrl_msg(dev, CMD_SET_LNA_GAIN, 0, dev->lna_gain->val,
- &u8tmp, 1);
- if (ret)
- dev_dbg(dev->dev, "failed=%d\n", ret);
-
- return ret;
-}
-
-static int hackrf_set_if_gain(struct hackrf_dev *dev)
+static int hackrf_s_ctrl_rx(struct v4l2_ctrl *ctrl)
{
+ struct hackrf_dev *dev = container_of(ctrl->handler,
+ struct hackrf_dev, rx_ctrl_handler);
+ struct usb_interface *intf = dev->intf;
int ret;
- u8 u8tmp;
- dev_dbg(dev->dev, "val=%d->%d\n",
- dev->if_gain->cur.val, dev->if_gain->val);
+ switch (ctrl->id) {
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
+ case V4L2_CID_RF_TUNER_BANDWIDTH:
+ set_bit(RX_BANDWIDTH, &dev->flags);
+ break;
+ case V4L2_CID_RF_TUNER_RF_GAIN:
+ set_bit(RX_RF_GAIN, &dev->flags);
+ break;
+ case V4L2_CID_RF_TUNER_LNA_GAIN:
+ set_bit(RX_LNA_GAIN, &dev->flags);
+ break;
+ case V4L2_CID_RF_TUNER_IF_GAIN:
+ set_bit(RX_IF_GAIN, &dev->flags);
+ break;
+ default:
+ dev_dbg(&intf->dev, "unknown ctrl: id=%d name=%s\n",
+ ctrl->id, ctrl->name);
+ ret = -EINVAL;
+ goto err;
+ }
- ret = hackrf_ctrl_msg(dev, CMD_SET_VGA_GAIN, 0, dev->if_gain->val,
- &u8tmp, 1);
+ ret = hackrf_set_params(dev);
if (ret)
- dev_dbg(dev->dev, "failed=%d\n", ret);
+ goto err;
+ return 0;
+err:
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
-static int hackrf_s_ctrl(struct v4l2_ctrl *ctrl)
+static int hackrf_s_ctrl_tx(struct v4l2_ctrl *ctrl)
{
struct hackrf_dev *dev = container_of(ctrl->handler,
- struct hackrf_dev, hdl);
+ struct hackrf_dev, tx_ctrl_handler);
+ struct usb_interface *intf = dev->intf;
int ret;
switch (ctrl->id) {
case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
case V4L2_CID_RF_TUNER_BANDWIDTH:
- ret = hackrf_set_bandwidth(dev);
+ set_bit(TX_BANDWIDTH, &dev->flags);
break;
case V4L2_CID_RF_TUNER_LNA_GAIN:
- ret = hackrf_set_lna_gain(dev);
+ set_bit(TX_LNA_GAIN, &dev->flags);
break;
- case V4L2_CID_RF_TUNER_IF_GAIN:
- ret = hackrf_set_if_gain(dev);
+ case V4L2_CID_RF_TUNER_RF_GAIN:
+ set_bit(TX_RF_GAIN, &dev->flags);
break;
default:
- dev_dbg(dev->dev, "unknown ctrl: id=%d name=%s\n",
- ctrl->id, ctrl->name);
+ dev_dbg(&intf->dev, "unknown ctrl: id=%d name=%s\n",
+ ctrl->id, ctrl->name);
ret = -EINVAL;
+ goto err;
}
+ ret = hackrf_set_params(dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
-static const struct v4l2_ctrl_ops hackrf_ctrl_ops = {
- .s_ctrl = hackrf_s_ctrl,
+static const struct v4l2_ctrl_ops hackrf_ctrl_ops_rx = {
+ .s_ctrl = hackrf_s_ctrl_rx,
+};
+
+static const struct v4l2_ctrl_ops hackrf_ctrl_ops_tx = {
+ .s_ctrl = hackrf_s_ctrl_tx,
};
static int hackrf_probe(struct usb_interface *intf,
@@ -1019,19 +1363,29 @@ static int hackrf_probe(struct usb_interface *intf,
u8 u8tmp, buf[BUF_SIZE];
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL)
- return -ENOMEM;
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
mutex_init(&dev->v4l2_lock);
mutex_init(&dev->vb_queue_lock);
- spin_lock_init(&dev->queued_bufs_lock);
- INIT_LIST_HEAD(&dev->queued_bufs);
+ spin_lock_init(&dev->buffer_list_lock);
+ INIT_LIST_HEAD(&dev->rx_buffer_list);
+ INIT_LIST_HEAD(&dev->tx_buffer_list);
+ dev->intf = intf;
dev->dev = &intf->dev;
dev->udev = interface_to_usbdev(intf);
- dev->f_adc = bands_adc[0].rangelow;
- dev->f_rf = bands_rf[0].rangelow;
dev->pixelformat = formats[0].pixelformat;
dev->buffersize = formats[0].buffersize;
+ dev->f_adc = bands_adc_dac[0].rangelow;
+ dev->f_dac = bands_adc_dac[0].rangelow;
+ dev->f_rx = bands_rx_tx[0].rangelow;
+ dev->f_tx = bands_rx_tx[0].rangelow;
+ set_bit(RX_ADC_FREQUENCY, &dev->flags);
+ set_bit(TX_DAC_FREQUENCY, &dev->flags);
+ set_bit(RX_RF_FREQUENCY, &dev->flags);
+ set_bit(TX_RF_FREQUENCY, &dev->flags);
/* Detect device */
ret = hackrf_ctrl_msg(dev, CMD_BOARD_ID_READ, 0, 0, &u8tmp, 1);
@@ -1040,83 +1394,143 @@ static int hackrf_probe(struct usb_interface *intf,
buf, BUF_SIZE);
if (ret) {
dev_err(dev->dev, "Could not detect board\n");
- goto err_free_mem;
+ goto err_kfree;
}
buf[BUF_SIZE - 1] = '\0';
-
dev_info(dev->dev, "Board ID: %02x\n", u8tmp);
dev_info(dev->dev, "Firmware version: %s\n", buf);
- /* Init videobuf2 queue structure */
- dev->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
- dev->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
- dev->vb_queue.drv_priv = dev;
- dev->vb_queue.buf_struct_size = sizeof(struct hackrf_frame_buf);
- dev->vb_queue.ops = &hackrf_vb2_ops;
- dev->vb_queue.mem_ops = &vb2_vmalloc_memops;
- dev->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- ret = vb2_queue_init(&dev->vb_queue);
+ /* Init vb2 queue structure for receiver */
+ dev->rx_vb2_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
+ dev->rx_vb2_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF |
+ VB2_READ;
+ dev->rx_vb2_queue.ops = &hackrf_vb2_ops;
+ dev->rx_vb2_queue.mem_ops = &vb2_vmalloc_memops;
+ dev->rx_vb2_queue.drv_priv = dev;
+ dev->rx_vb2_queue.buf_struct_size = sizeof(struct hackrf_buffer);
+ dev->rx_vb2_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ ret = vb2_queue_init(&dev->rx_vb2_queue);
if (ret) {
- dev_err(dev->dev, "Could not initialize vb2 queue\n");
- goto err_free_mem;
+ dev_err(dev->dev, "Could not initialize rx vb2 queue\n");
+ goto err_kfree;
}
- /* Init video_device structure */
- dev->vdev = hackrf_template;
- dev->vdev.queue = &dev->vb_queue;
- dev->vdev.queue->lock = &dev->vb_queue_lock;
- video_set_drvdata(&dev->vdev, dev);
+ /* Init vb2 queue structure for transmitter */
+ dev->tx_vb2_queue.type = V4L2_BUF_TYPE_SDR_OUTPUT;
+ dev->tx_vb2_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF |
+ VB2_WRITE;
+ dev->tx_vb2_queue.ops = &hackrf_vb2_ops;
+ dev->tx_vb2_queue.mem_ops = &vb2_vmalloc_memops;
+ dev->tx_vb2_queue.drv_priv = dev;
+ dev->tx_vb2_queue.buf_struct_size = sizeof(struct hackrf_buffer);
+ dev->tx_vb2_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ ret = vb2_queue_init(&dev->tx_vb2_queue);
+ if (ret) {
+ dev_err(dev->dev, "Could not initialize tx vb2 queue\n");
+ goto err_kfree;
+ }
+
+ /* Register controls for receiver */
+ v4l2_ctrl_handler_init(&dev->rx_ctrl_handler, 5);
+ dev->rx_bandwidth_auto = v4l2_ctrl_new_std(&dev->rx_ctrl_handler,
+ &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO,
+ 0, 1, 0, 1);
+ dev->rx_bandwidth = v4l2_ctrl_new_std(&dev->rx_ctrl_handler,
+ &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_BANDWIDTH,
+ 1750000, 28000000, 50000, 1750000);
+ v4l2_ctrl_auto_cluster(2, &dev->rx_bandwidth_auto, 0, false);
+ dev->rx_rf_gain = v4l2_ctrl_new_std(&dev->rx_ctrl_handler,
+ &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_RF_GAIN, 0, 12, 12, 0);
+ dev->rx_lna_gain = v4l2_ctrl_new_std(&dev->rx_ctrl_handler,
+ &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_LNA_GAIN, 0, 40, 8, 0);
+ dev->rx_if_gain = v4l2_ctrl_new_std(&dev->rx_ctrl_handler,
+ &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_IF_GAIN, 0, 62, 2, 0);
+ if (dev->rx_ctrl_handler.error) {
+ ret = dev->rx_ctrl_handler.error;
+ dev_err(dev->dev, "Could not initialize controls\n");
+ goto err_v4l2_ctrl_handler_free_rx;
+ }
+ v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler);
+
+ /* Register controls for transmitter */
+ v4l2_ctrl_handler_init(&dev->tx_ctrl_handler, 4);
+ dev->tx_bandwidth_auto = v4l2_ctrl_new_std(&dev->tx_ctrl_handler,
+ &hackrf_ctrl_ops_tx, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO,
+ 0, 1, 0, 1);
+ dev->tx_bandwidth = v4l2_ctrl_new_std(&dev->tx_ctrl_handler,
+ &hackrf_ctrl_ops_tx, V4L2_CID_RF_TUNER_BANDWIDTH,
+ 1750000, 28000000, 50000, 1750000);
+ v4l2_ctrl_auto_cluster(2, &dev->tx_bandwidth_auto, 0, false);
+ dev->tx_lna_gain = v4l2_ctrl_new_std(&dev->tx_ctrl_handler,
+ &hackrf_ctrl_ops_tx, V4L2_CID_RF_TUNER_LNA_GAIN, 0, 47, 1, 0);
+ dev->tx_rf_gain = v4l2_ctrl_new_std(&dev->tx_ctrl_handler,
+ &hackrf_ctrl_ops_tx, V4L2_CID_RF_TUNER_RF_GAIN, 0, 15, 15, 0);
+ if (dev->tx_ctrl_handler.error) {
+ ret = dev->tx_ctrl_handler.error;
+ dev_err(dev->dev, "Could not initialize controls\n");
+ goto err_v4l2_ctrl_handler_free_tx;
+ }
+ v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler);
/* Register the v4l2_device structure */
dev->v4l2_dev.release = hackrf_video_release;
ret = v4l2_device_register(&intf->dev, &dev->v4l2_dev);
if (ret) {
dev_err(dev->dev, "Failed to register v4l2-device (%d)\n", ret);
- goto err_free_mem;
+ goto err_v4l2_ctrl_handler_free_tx;
}
- /* Register controls */
- v4l2_ctrl_handler_init(&dev->hdl, 4);
- dev->bandwidth_auto = v4l2_ctrl_new_std(&dev->hdl, &hackrf_ctrl_ops,
- V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 1, 1);
- dev->bandwidth = v4l2_ctrl_new_std(&dev->hdl, &hackrf_ctrl_ops,
- V4L2_CID_RF_TUNER_BANDWIDTH,
- 1750000, 28000000, 50000, 1750000);
- v4l2_ctrl_auto_cluster(2, &dev->bandwidth_auto, 0, false);
- dev->lna_gain = v4l2_ctrl_new_std(&dev->hdl, &hackrf_ctrl_ops,
- V4L2_CID_RF_TUNER_LNA_GAIN, 0, 40, 8, 0);
- dev->if_gain = v4l2_ctrl_new_std(&dev->hdl, &hackrf_ctrl_ops,
- V4L2_CID_RF_TUNER_IF_GAIN, 0, 62, 2, 0);
- if (dev->hdl.error) {
- ret = dev->hdl.error;
- dev_err(dev->dev, "Could not initialize controls\n");
- goto err_free_controls;
+ /* Init video_device structure for receiver */
+ dev->rx_vdev = hackrf_template;
+ dev->rx_vdev.queue = &dev->rx_vb2_queue;
+ dev->rx_vdev.queue->lock = &dev->vb_queue_lock;
+ dev->rx_vdev.v4l2_dev = &dev->v4l2_dev;
+ dev->rx_vdev.ctrl_handler = &dev->rx_ctrl_handler;
+ dev->rx_vdev.lock = &dev->v4l2_lock;
+ dev->rx_vdev.vfl_dir = VFL_DIR_RX;
+ video_set_drvdata(&dev->rx_vdev, dev);
+ ret = video_register_device(&dev->rx_vdev, VFL_TYPE_SDR, -1);
+ if (ret) {
+ dev_err(dev->dev,
+ "Failed to register as video device (%d)\n", ret);
+ goto err_v4l2_device_unregister;
}
-
- v4l2_ctrl_handler_setup(&dev->hdl);
-
- dev->v4l2_dev.ctrl_handler = &dev->hdl;
- dev->vdev.v4l2_dev = &dev->v4l2_dev;
- dev->vdev.lock = &dev->v4l2_lock;
-
- ret = video_register_device(&dev->vdev, VFL_TYPE_SDR, -1);
+ dev_info(dev->dev, "Registered as %s\n",
+ video_device_node_name(&dev->rx_vdev));
+
+ /* Init video_device structure for transmitter */
+ dev->tx_vdev = hackrf_template;
+ dev->tx_vdev.queue = &dev->tx_vb2_queue;
+ dev->tx_vdev.queue->lock = &dev->vb_queue_lock;
+ dev->tx_vdev.v4l2_dev = &dev->v4l2_dev;
+ dev->tx_vdev.ctrl_handler = &dev->tx_ctrl_handler;
+ dev->tx_vdev.lock = &dev->v4l2_lock;
+ dev->tx_vdev.vfl_dir = VFL_DIR_TX;
+ video_set_drvdata(&dev->tx_vdev, dev);
+ ret = video_register_device(&dev->tx_vdev, VFL_TYPE_SDR, -1);
if (ret) {
- dev_err(dev->dev, "Failed to register as video device (%d)\n",
- ret);
- goto err_unregister_v4l2_dev;
+ dev_err(dev->dev,
+ "Failed to register as video device (%d)\n", ret);
+ goto err_video_unregister_device_rx;
}
dev_info(dev->dev, "Registered as %s\n",
- video_device_node_name(&dev->vdev));
+ video_device_node_name(&dev->tx_vdev));
+
dev_notice(dev->dev, "SDR API is still slightly experimental and functionality changes may follow\n");
return 0;
-
-err_free_controls:
- v4l2_ctrl_handler_free(&dev->hdl);
-err_unregister_v4l2_dev:
+err_video_unregister_device_rx:
+ video_unregister_device(&dev->rx_vdev);
+err_v4l2_device_unregister:
v4l2_device_unregister(&dev->v4l2_dev);
-err_free_mem:
+err_v4l2_ctrl_handler_free_tx:
+ v4l2_ctrl_handler_free(&dev->tx_ctrl_handler);
+err_v4l2_ctrl_handler_free_rx:
+ v4l2_ctrl_handler_free(&dev->rx_ctrl_handler);
+err_kfree:
kfree(dev);
+err:
+ dev_dbg(dev->dev, "failed=%d\n", ret);
return ret;
}
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 3f276d921cca..e06a21a4fbd9 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -28,6 +28,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <linux/usb.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
#include <linux/spi/spi.h>
@@ -112,7 +113,8 @@ static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
/* intermediate buffers with raw data from the USB device */
struct msi2500_frame_buf {
- struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
@@ -431,10 +433,10 @@ static void msi2500_isoc_handler(struct urb *urb)
}
/* fill framebuffer */
- ptr = vb2_plane_vaddr(&fbuf->vb, 0);
+ ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
flen = msi2500_convert_stream(dev, ptr, iso_buf, flen);
- vb2_set_plane_payload(&fbuf->vb, 0, flen);
- vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+ vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, flen);
+ vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
handler_end:
@@ -569,7 +571,7 @@ static void msi2500_cleanup_queued_bufs(struct msi2500_dev *dev)
buf = list_entry(dev->queued_bufs.next,
struct msi2500_frame_buf, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
}
@@ -614,7 +616,7 @@ static int msi2500_querycap(struct file *file, void *fh,
/* Videobuf2 operations */
static int msi2500_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[],
void *alloc_ctxs[])
@@ -633,15 +635,16 @@ static int msi2500_queue_setup(struct vb2_queue *vq,
static void msi2500_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct msi2500_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct msi2500_frame_buf *buf = container_of(vb,
+ struct msi2500_frame_buf *buf = container_of(vbuf,
struct msi2500_frame_buf,
vb);
unsigned long flags;
/* Check the device has not disconnected between prep and queuing */
if (unlikely(!dev->udev)) {
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
return;
}
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 702267e208ba..b79c36fd8cd2 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -240,9 +240,9 @@ static void pwc_frame_complete(struct pwc_device *pdev)
PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes);"
" discarded.\n", fbuf->filled);
} else {
- fbuf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
- fbuf->vb.v4l2_buf.sequence = pdev->vframe_count;
- vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+ fbuf->vb.field = V4L2_FIELD_NONE;
+ fbuf->vb.sequence = pdev->vframe_count;
+ vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
pdev->fill_buf = NULL;
pdev->vsync = 0;
}
@@ -287,7 +287,7 @@ static void pwc_isoc_handler(struct urb *urb)
{
PWC_ERROR("Too many ISOC errors, bailing out.\n");
if (pdev->fill_buf) {
- vb2_buffer_done(&pdev->fill_buf->vb,
+ vb2_buffer_done(&pdev->fill_buf->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
pdev->fill_buf = NULL;
}
@@ -317,7 +317,7 @@ static void pwc_isoc_handler(struct urb *urb)
if (pdev->vsync == 1) {
v4l2_get_timestamp(
- &fbuf->vb.v4l2_buf.timestamp);
+ &fbuf->vb.timestamp);
pdev->vsync = 2;
}
@@ -520,7 +520,7 @@ static void pwc_cleanup_queued_bufs(struct pwc_device *pdev,
buf = list_entry(pdev->queued_bufs.next, struct pwc_frame_buf,
list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, state);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
}
spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
}
@@ -571,7 +571,7 @@ static void pwc_video_release(struct v4l2_device *v)
/***************************************************************************/
/* Videobuf2 operations */
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -594,7 +594,9 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
static int buffer_init(struct vb2_buffer *vb)
{
- struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct pwc_frame_buf *buf =
+ container_of(vbuf, struct pwc_frame_buf, vb);
/* need vmalloc since frame buffer > 128K */
buf->data = vzalloc(PWC_FRAME_SIZE);
@@ -618,7 +620,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_finish(struct vb2_buffer *vb)
{
struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
- struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct pwc_frame_buf *buf =
+ container_of(vbuf, struct pwc_frame_buf, vb);
if (vb->state == VB2_BUF_STATE_DONE) {
/*
@@ -633,7 +637,9 @@ static void buffer_finish(struct vb2_buffer *vb)
static void buffer_cleanup(struct vb2_buffer *vb)
{
- struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct pwc_frame_buf *buf =
+ container_of(vbuf, struct pwc_frame_buf, vb);
vfree(buf->data);
}
@@ -641,12 +647,14 @@ static void buffer_cleanup(struct vb2_buffer *vb)
static void buffer_queue(struct vb2_buffer *vb)
{
struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
- struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct pwc_frame_buf *buf =
+ container_of(vbuf, struct pwc_frame_buf, vb);
unsigned long flags = 0;
/* Check the device has not disconnected between prep and queuing */
if (!pdev->udev) {
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
return;
}
@@ -695,7 +703,8 @@ static void stop_streaming(struct vb2_queue *vq)
pwc_cleanup_queued_bufs(pdev, VB2_BUF_STATE_ERROR);
if (pdev->fill_buf)
- vb2_buffer_done(&pdev->fill_buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&pdev->fill_buf->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
mutex_unlock(&pdev->v4l2_lock);
}
diff --git a/drivers/media/usb/pwc/pwc-uncompress.c b/drivers/media/usb/pwc/pwc-uncompress.c
index b65903fbcf0d..98c46f93f119 100644
--- a/drivers/media/usb/pwc/pwc-uncompress.c
+++ b/drivers/media/usb/pwc/pwc-uncompress.c
@@ -40,7 +40,7 @@ int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
u16 *src;
u16 *dsty, *dstu, *dstv;
- image = vb2_plane_vaddr(&fbuf->vb, 0);
+ image = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
yuv = fbuf->data + pdev->frame_header_size; /* Skip header */
@@ -55,12 +55,12 @@ int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
* determine this using the type of the webcam */
memcpy(raw_frame->cmd, pdev->cmd_buf, 4);
memcpy(raw_frame+1, yuv, pdev->frame_size);
- vb2_set_plane_payload(&fbuf->vb, 0,
+ vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0,
pdev->frame_size + sizeof(struct pwc_raw_frame));
return 0;
}
- vb2_set_plane_payload(&fbuf->vb, 0,
+ vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0,
pdev->width * pdev->height * 3 / 2);
if (pdev->vbandlength == 0) {
diff --git a/drivers/media/usb/pwc/pwc.h b/drivers/media/usb/pwc/pwc.h
index 81b017a554bc..3c73bdaae450 100644
--- a/drivers/media/usb/pwc/pwc.h
+++ b/drivers/media/usb/pwc/pwc.h
@@ -40,6 +40,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
#include <linux/input.h>
@@ -210,7 +211,8 @@ struct pwc_raw_frame {
/* intermediate buffers with raw data from the USB cam */
struct pwc_frame_buf
{
- struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vb;
struct list_head list;
void *data;
int filled; /* number of bytes filled */
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 0f3c34d47ec3..e7acb12ad21d 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -45,6 +45,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/usb.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
@@ -293,7 +294,7 @@ struct s2255_fmt {
/* buffer for one video frame */
struct s2255_buffer {
/* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
@@ -573,14 +574,14 @@ static void s2255_got_frame(struct s2255_vc *vc, int jpgsize)
buf = list_entry(vc->buf_list.next,
struct s2255_buffer, list);
list_del(&buf->list);
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- buf->vb.v4l2_buf.field = vc->field;
- buf->vb.v4l2_buf.sequence = vc->frame_count;
+ v4l2_get_timestamp(&buf->vb.timestamp);
+ buf->vb.field = vc->field;
+ buf->vb.sequence = vc->frame_count;
spin_unlock_irqrestore(&vc->qlock, flags);
s2255_fillbuff(vc, buf, jpgsize);
/* tell v4l buffer was filled */
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
dprintk(dev, 2, "%s: [buf] [%p]\n", __func__, buf);
}
@@ -612,7 +613,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
{
int pos = 0;
const char *tmpbuf;
- char *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+ char *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
unsigned long last_frame;
struct s2255_dev *dev = vc->dev;
@@ -635,7 +636,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
break;
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_MJPEG:
- vb2_set_plane_payload(&buf->vb, 0, jpgsize);
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, jpgsize);
memcpy(vbuf, tmpbuf, jpgsize);
break;
case V4L2_PIX_FMT_YUV422P:
@@ -659,7 +660,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
Videobuf operations
------------------------------------------------------------------*/
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -674,7 +675,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
static int buffer_prepare(struct vb2_buffer *vb)
{
struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue);
- struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct s2255_buffer *buf = container_of(vbuf, struct s2255_buffer, vb);
int w = vc->width;
int h = vc->height;
unsigned long size;
@@ -696,13 +698,14 @@ static int buffer_prepare(struct vb2_buffer *vb)
return -EINVAL;
}
- vb2_set_plane_payload(&buf->vb, 0, size);
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
return 0;
}
static void buffer_queue(struct vb2_buffer *vb)
{
- struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct s2255_buffer *buf = container_of(vbuf, struct s2255_buffer, vb);
struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue);
unsigned long flags = 0;
dprintk(vc->dev, 1, "%s\n", __func__);
@@ -1116,9 +1119,9 @@ static void stop_streaming(struct vb2_queue *vq)
spin_lock_irqsave(&vc->qlock, flags);
list_for_each_entry_safe(buf, node, &vc->buf_list, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(vc->dev, 2, "[%p/%d] done\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
}
spin_unlock_irqrestore(&vc->qlock, flags);
}
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index e12b10352871..0bd34f1e7fa9 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -664,7 +664,7 @@ static const struct v4l2_ioctl_ops stk1160_ioctl_ops = {
/*
* Videobuf2 operations
*/
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *v4l_fmt,
+static int queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -695,8 +695,9 @@ static void buffer_queue(struct vb2_buffer *vb)
{
unsigned long flags;
struct stk1160 *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct stk1160_buffer *buf =
- container_of(vb, struct stk1160_buffer, vb);
+ container_of(vbuf, struct stk1160_buffer, vb);
spin_lock_irqsave(&dev->buf_lock, flags);
if (!dev->udev) {
@@ -704,7 +705,7 @@ static void buffer_queue(struct vb2_buffer *vb)
* If the device is disconnected return the buffer to userspace
* directly. The next QBUF call will fail with -ENODEV.
*/
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
} else {
buf->mem = vb2_plane_vaddr(vb, 0);
@@ -717,7 +718,7 @@ static void buffer_queue(struct vb2_buffer *vb)
* the buffer to userspace directly.
*/
if (buf->length < dev->width * dev->height * 2)
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
else
list_add_tail(&buf->list, &dev->avail_bufs);
@@ -769,9 +770,9 @@ void stk1160_clear_queue(struct stk1160 *dev)
buf = list_first_entry(&dev->avail_bufs,
struct stk1160_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
stk1160_dbg("buffer [%p/%d] aborted\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
}
/* It's important to release the current buffer */
@@ -779,9 +780,9 @@ void stk1160_clear_queue(struct stk1160 *dev)
buf = dev->isoc_ctl.buf;
dev->isoc_ctl.buf = NULL;
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
stk1160_dbg("buffer [%p/%d] aborted\n",
- buf, buf->vb.v4l2_buf.index);
+ buf, buf->vb.vb2_buf.index);
}
spin_unlock_irqrestore(&dev->buf_lock, flags);
}
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 940c3eaea507..75654e676e80 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -96,13 +96,13 @@ void stk1160_buffer_done(struct stk1160 *dev)
{
struct stk1160_buffer *buf = dev->isoc_ctl.buf;
- buf->vb.v4l2_buf.sequence = dev->sequence++;
- buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
- buf->vb.v4l2_buf.bytesused = buf->bytesused;
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ buf->vb.sequence = dev->sequence++;
+ buf->vb.field = V4L2_FIELD_INTERLACED;
+ buf->vb.vb2_buf.planes[0].bytesused = buf->bytesused;
+ v4l2_get_timestamp(&buf->vb.timestamp);
- vb2_set_plane_payload(&buf->vb, 0, buf->bytesused);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->bytesused);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
dev->isoc_ctl.buf = NULL;
}
diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
index 72cc8e8cbef7..1ed1cc43cdb2 100644
--- a/drivers/media/usb/stk1160/stk1160.h
+++ b/drivers/media/usb/stk1160/stk1160.h
@@ -23,7 +23,7 @@
#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/ac97_codec.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
@@ -77,7 +77,7 @@
/* Buffer for one video frame */
struct stk1160_buffer {
/* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
void *mem;
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c
index 74e5697d8678..e21c7aacecb6 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/media/usb/tm6000/tm6000-alsa.c
@@ -42,7 +42,7 @@
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
-static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
+static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable tm6000x soundcard. default enabled.");
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 7c3a7c55d969..a5de46f04247 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -375,8 +375,7 @@ static int ttusb_dec_audio_pes2ts_cb(void *priv, unsigned char *data)
struct ttusb_dec *dec = priv;
dec->audio_filter->feed->cb.ts(data, 188, NULL, 0,
- &dec->audio_filter->feed->feed.ts,
- DMX_OK);
+ &dec->audio_filter->feed->feed.ts);
return 0;
}
@@ -386,8 +385,7 @@ static int ttusb_dec_video_pes2ts_cb(void *priv, unsigned char *data)
struct ttusb_dec *dec = priv;
dec->video_filter->feed->cb.ts(data, 188, NULL, 0,
- &dec->video_filter->feed->feed.ts,
- DMX_OK);
+ &dec->video_filter->feed->feed.ts);
return 0;
}
@@ -439,7 +437,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length)
if (output_pva) {
dec->video_filter->feed->cb.ts(pva, length, NULL, 0,
- &dec->video_filter->feed->feed.ts, DMX_OK);
+ &dec->video_filter->feed->feed.ts);
return;
}
@@ -500,7 +498,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length)
case 0x02: /* MainAudioStream */
if (output_pva) {
dec->audio_filter->feed->cb.ts(pva, length, NULL, 0,
- &dec->audio_filter->feed->feed.ts, DMX_OK);
+ &dec->audio_filter->feed->feed.ts);
return;
}
@@ -538,7 +536,7 @@ static void ttusb_dec_process_filter(struct ttusb_dec *dec, u8 *packet,
if (filter)
filter->feed->cb.sec(&packet[2], length - 2, NULL, 0,
- &filter->filter, DMX_OK);
+ &filter->filter);
}
static void ttusb_dec_process_packet(struct ttusb_dec *dec)
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 08fb0f2da64d..e645c9df2d94 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -29,7 +29,7 @@
*/
#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include "usbtv.h"
@@ -306,7 +306,7 @@ static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk)
/* First available buffer. */
buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list);
- frame = vb2_plane_vaddr(&buf->vb, 0);
+ frame = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
/* Copy the chunk data. */
usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
@@ -314,17 +314,17 @@ static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk)
/* Last chunk in a frame, signalling an end */
if (odd && chunk_no == usbtv->n_chunks-1) {
- int size = vb2_plane_size(&buf->vb, 0);
+ int size = vb2_plane_size(&buf->vb.vb2_buf, 0);
enum vb2_buffer_state state = usbtv->chunks_done ==
usbtv->n_chunks ?
VB2_BUF_STATE_DONE :
VB2_BUF_STATE_ERROR;
- buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
- buf->vb.v4l2_buf.sequence = usbtv->sequence++;
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- vb2_set_plane_payload(&buf->vb, 0, size);
- vb2_buffer_done(&buf->vb, state);
+ buf->vb.field = V4L2_FIELD_INTERLACED;
+ buf->vb.sequence = usbtv->sequence++;
+ v4l2_get_timestamp(&buf->vb.timestamp);
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
list_del(&buf->list);
}
@@ -422,7 +422,7 @@ static void usbtv_stop(struct usbtv *usbtv)
while (!list_empty(&usbtv->bufs)) {
struct usbtv_buf *buf = list_first_entry(&usbtv->bufs,
struct usbtv_buf, list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
list_del(&buf->list);
}
spin_unlock_irqrestore(&usbtv->buflock, flags);
@@ -599,9 +599,10 @@ static struct v4l2_file_operations usbtv_fops = {
};
static int usbtv_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt, unsigned int *nbuffers,
+ const void *parg, unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct usbtv *usbtv = vb2_get_drv_priv(vq);
unsigned size = USBTV_CHUNK * usbtv->n_chunks * 2 * sizeof(u32);
@@ -617,8 +618,9 @@ static int usbtv_queue_setup(struct vb2_queue *vq,
static void usbtv_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct usbtv *usbtv = vb2_get_drv_priv(vb->vb2_queue);
- struct usbtv_buf *buf = container_of(vb, struct usbtv_buf, vb);
+ struct usbtv_buf *buf = container_of(vbuf, struct usbtv_buf, vb);
unsigned long flags;
if (usbtv->udev == NULL) {
diff --git a/drivers/media/usb/usbtv/usbtv.h b/drivers/media/usb/usbtv/usbtv.h
index 968119581fab..19cb8bf7c4e9 100644
--- a/drivers/media/usb/usbtv/usbtv.h
+++ b/drivers/media/usb/usbtv/usbtv.h
@@ -24,6 +24,7 @@
#include <linux/usb.h>
#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
/* Hardware. */
@@ -61,7 +62,7 @@ struct usbtv_norm_params {
/* A single videobuf2 frame buffer. */
struct usbtv_buf {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 4b5b3e8fb7d3..d11fd6ac2df0 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -32,6 +32,7 @@
#define DRIVER_DESC "USB Video Class driver"
unsigned int uvc_clock_param = CLOCK_MONOTONIC;
+unsigned int uvc_hw_timestamps_param;
unsigned int uvc_no_drop_param;
static unsigned int uvc_quirks_param = -1;
unsigned int uvc_trace_param;
@@ -2078,6 +2079,8 @@ static int uvc_clock_param_set(const char *val, struct kernel_param *kp)
module_param_call(clock, uvc_clock_param_set, uvc_clock_param_get,
&uvc_clock_param, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(clock, "Video buffers timestamp clock");
+module_param_named(hwtimestamps, uvc_hw_timestamps_param, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(hwtimestamps, "Use hardware timestamps");
module_param_named(nodrop, uvc_no_drop_param, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(nodrop, "Don't drop incomplete frames");
module_param_named(quirks, uvc_quirks_param, uint, S_IRUGO|S_IWUSR);
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index f16b9b42689d..cfb868a48b5f 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -20,6 +20,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
#include "uvcvideo.h"
@@ -60,7 +61,7 @@ static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
queue);
list_del(&buf->queue);
buf->state = state;
- vb2_buffer_done(&buf->buf, vb2_state);
+ vb2_buffer_done(&buf->buf.vb2_buf, vb2_state);
}
}
@@ -68,10 +69,11 @@ static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
* videobuf2 queue operations
*/
-static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int uvc_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
+ const struct v4l2_format *fmt = parg;
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
struct uvc_streaming *stream = uvc_queue_to_stream(queue);
@@ -89,10 +91,11 @@ static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
static int uvc_buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
- struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
+ struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
- if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
return -EINVAL;
@@ -105,7 +108,7 @@ static int uvc_buffer_prepare(struct vb2_buffer *vb)
buf->error = 0;
buf->mem = vb2_plane_vaddr(vb, 0);
buf->length = vb2_plane_size(vb, 0);
- if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
buf->bytesused = 0;
else
buf->bytesused = vb2_get_plane_payload(vb, 0);
@@ -115,8 +118,9 @@ static int uvc_buffer_prepare(struct vb2_buffer *vb)
static void uvc_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
- struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
+ struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
unsigned long flags;
spin_lock_irqsave(&queue->irqlock, flags);
@@ -127,7 +131,7 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
* directly. The next QBUF call will fail with -ENODEV.
*/
buf->state = UVC_BUF_STATE_ERROR;
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&queue->irqlock, flags);
@@ -135,12 +139,13 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
static void uvc_buffer_finish(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
struct uvc_streaming *stream = uvc_queue_to_stream(queue);
- struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
+ struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
if (vb->state == VB2_BUF_STATE_DONE)
- uvc_video_clock_update(stream, &vb->v4l2_buf, buf);
+ uvc_video_clock_update(stream, vbuf, buf);
}
static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -398,7 +403,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
buf->error = 0;
buf->state = UVC_BUF_STATE_QUEUED;
buf->bytesused = 0;
- vb2_set_plane_payload(&buf->buf, 0, 0);
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
return buf;
}
@@ -412,8 +417,8 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
spin_unlock_irqrestore(&queue->irqlock, flags);
buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
- vb2_set_plane_payload(&buf->buf, 0, buf->bytesused);
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
return nextbuf;
}
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index f839654ea436..2b276ab7764f 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -606,7 +606,7 @@ static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample)
* timestamp of the sliding window to 1s.
*/
void uvc_video_clock_update(struct uvc_streaming *stream,
- struct v4l2_buffer *v4l2_buf,
+ struct vb2_v4l2_buffer *vbuf,
struct uvc_buffer *buf)
{
struct uvc_clock *clock = &stream->clock;
@@ -623,6 +623,9 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
u32 rem;
u64 y;
+ if (!uvc_hw_timestamps_param)
+ return;
+
spin_lock_irqsave(&clock->lock, flags);
if (clock->count < clock->size)
@@ -696,14 +699,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
stream->dev->name,
sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
y, ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC,
- v4l2_buf->timestamp.tv_sec,
- (unsigned long)v4l2_buf->timestamp.tv_usec,
+ vbuf->timestamp.tv_sec,
+ (unsigned long)vbuf->timestamp.tv_usec,
x1, first->host_sof, first->dev_sof,
x2, last->host_sof, last->dev_sof, y1, y2);
/* Update the V4L2 buffer. */
- v4l2_buf->timestamp.tv_sec = ts.tv_sec;
- v4l2_buf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ vbuf->timestamp.tv_sec = ts.tv_sec;
+ vbuf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
done:
spin_unlock_irqrestore(&stream->clock.lock, flags);
@@ -1029,10 +1032,10 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
uvc_video_get_ts(&ts);
- buf->buf.v4l2_buf.field = V4L2_FIELD_NONE;
- buf->buf.v4l2_buf.sequence = stream->sequence;
- buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
- buf->buf.v4l2_buf.timestamp.tv_usec =
+ buf->buf.field = V4L2_FIELD_NONE;
+ buf->buf.sequence = stream->sequence;
+ buf->buf.timestamp.tv_sec = ts.tv_sec;
+ buf->buf.timestamp.tv_usec =
ts.tv_nsec / NSEC_PER_USEC;
/* TODO: Handle PTS and SCR. */
@@ -1305,7 +1308,7 @@ static void uvc_video_encode_bulk(struct urb *urb, struct uvc_streaming *stream,
if (buf->bytesused == stream->queue.buf_used) {
stream->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_READY;
- buf->buf.v4l2_buf.sequence = ++stream->sequence;
+ buf->buf.sequence = ++stream->sequence;
uvc_queue_next_buffer(&stream->queue, buf);
stream->last_fid ^= UVC_STREAM_FID;
}
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 816dd1a0fd81..f0f2391e1b43 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -15,7 +15,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fh.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
/* --------------------------------------------------------------------------
* UVC constants
@@ -354,7 +354,7 @@ enum uvc_buffer_state {
};
struct uvc_buffer {
- struct vb2_buffer buf;
+ struct vb2_v4l2_buffer buf;
struct list_head queue;
enum uvc_buffer_state state;
@@ -593,6 +593,7 @@ extern unsigned int uvc_clock_param;
extern unsigned int uvc_no_drop_param;
extern unsigned int uvc_trace_param;
extern unsigned int uvc_timeout_param;
+extern unsigned int uvc_hw_timestamps_param;
#define uvc_trace(flag, msg...) \
do { \
@@ -673,7 +674,7 @@ extern int uvc_probe_video(struct uvc_streaming *stream,
extern int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
__u8 intfnum, __u8 cs, void *data, __u16 size);
void uvc_video_clock_update(struct uvc_streaming *stream,
- struct v4l2_buffer *v4l2_buf,
+ struct vb2_v4l2_buffer *vbuf,
struct uvc_buffer *buf);
/* Status */
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index d1dd440d9d9b..1dc8bba2b198 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -14,7 +14,7 @@ ifeq ($(CONFIG_OF),y)
videodev-objs += v4l2-of.o
endif
ifeq ($(CONFIG_TRACEPOINTS),y)
- videodev-objs += v4l2-trace.o
+ videodev-objs += vb2-trace.o v4l2-trace.o
endif
obj-$(CONFIG_VIDEO_V4L2) += videodev.o
@@ -33,7 +33,7 @@ obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
-obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o
+obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o videobuf2-v4l2.o
obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index af635430524e..327e83ac2469 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -147,6 +147,20 @@ static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp,
return 0;
}
+static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
+{
+ if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
+{
+ if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format)))
+ return -EFAULT;
+ return 0;
+}
+
struct v4l2_format32 {
__u32 type; /* enum v4l2_buf_type */
union {
@@ -155,6 +169,7 @@ struct v4l2_format32 {
struct v4l2_window32 win;
struct v4l2_vbi_format vbi;
struct v4l2_sliced_vbi_format sliced;
+ struct v4l2_sdr_format sdr;
__u8 raw_data[200]; /* user-defined */
} fmt;
};
@@ -198,8 +213,11 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ return get_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr);
default:
- printk(KERN_INFO "compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
+ pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
kp->type);
return -EINVAL;
}
@@ -242,8 +260,11 @@ static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ return put_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr);
default:
- printk(KERN_INFO "compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
+ pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
kp->type);
return -EINVAL;
}
@@ -266,7 +287,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_
struct v4l2_standard32 {
__u32 index;
- __u32 id[2]; /* __u64 would get the alignment wrong */
+ compat_u64 id;
__u8 name[24];
struct v4l2_fract frameperiod; /* Frames, not fields */
__u32 framelines;
@@ -286,7 +307,7 @@ static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32
{
if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
put_user(kp->index, &up->index) ||
- copy_to_user(up->id, &kp->id, sizeof(__u64)) ||
+ put_user(kp->id, &up->id) ||
copy_to_user(up->name, kp->name, 24) ||
copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
put_user(kp->framelines, &up->framelines) ||
@@ -587,10 +608,10 @@ struct v4l2_input32 {
__u32 type; /* Type of input */
__u32 audioset; /* Associated audios (bitfield) */
__u32 tuner; /* Associated tuner */
- v4l2_std_id std;
+ compat_u64 std;
__u32 status;
__u32 reserved[4];
-} __attribute__ ((packed));
+};
/* The 64-bit v4l2_input struct has extra padding at the end of the struct.
Otherwise it is identical to the 32-bit version. */
@@ -609,11 +630,11 @@ static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __
}
struct v4l2_ext_controls32 {
- __u32 ctrl_class;
- __u32 count;
- __u32 error_idx;
- __u32 reserved[2];
- compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
+ __u32 ctrl_class;
+ __u32 count;
+ __u32 error_idx;
+ __u32 reserved[2];
+ compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
};
struct v4l2_ext_control32 {
@@ -655,7 +676,8 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
get_user(kp->ctrl_class, &up->ctrl_class) ||
get_user(kp->count, &up->count) ||
get_user(kp->error_idx, &up->error_idx) ||
- copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+ copy_from_user(kp->reserved, up->reserved,
+ sizeof(kp->reserved)))
return -EFAULT;
n = kp->count;
if (n == 0) {
@@ -738,6 +760,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
struct v4l2_event32 {
__u32 type;
union {
+ compat_s64 value64;
__u8 data[64];
} u;
__u32 pending;
@@ -1033,8 +1056,8 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
ret = vdev->fops->compat_ioctl32(file, cmd, arg);
if (ret == -ENOIOCTLCMD)
- pr_warn("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
- _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd);
+ pr_debug("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
+ _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index b6b7dcc1b77d..4a1d9fdd14bb 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -888,6 +888,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis";
case V4L2_CID_RDS_RECEPTION: return "RDS Reception";
case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls";
+ case V4L2_CID_RF_TUNER_RF_GAIN: return "RF Gain";
case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto";
case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain";
case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto";
@@ -1161,6 +1162,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_PILOT_TONE_FREQUENCY:
case V4L2_CID_TUNE_POWER_LEVEL:
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
+ case V4L2_CID_RF_TUNER_RF_GAIN:
case V4L2_CID_RF_TUNER_LNA_GAIN:
case V4L2_CID_RF_TUNER_MIXER_GAIN:
case V4L2_CID_RF_TUNER_IF_GAIN:
@@ -2498,7 +2500,7 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
/* We found a control with the given ID, so just get
the next valid one in the list. */
list_for_each_entry_continue(ref, &hdl->ctrl_refs, node) {
- is_compound =
+ is_compound = ref->ctrl->is_array ||
ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
if (id < ref->ctrl->id &&
(is_compound & mask) == match)
@@ -2512,7 +2514,7 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
is one, otherwise the first 'if' above would have
been true. */
list_for_each_entry(ref, &hdl->ctrl_refs, node) {
- is_compound =
+ is_compound = ref->ctrl->is_array ||
ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
if (id < ref->ctrl->id &&
(is_compound & mask) == match)
@@ -2884,7 +2886,7 @@ static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
* cur_to_user() calls below would need to be modified not to access
* userspace memory when called from get_ctrl().
*/
- if (!ctrl->is_int)
+ if (!ctrl->is_int && ctrl->type != V4L2_CTRL_TYPE_INTEGER64)
return -EINVAL;
if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
@@ -2942,9 +2944,9 @@ s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl)
/* It's a driver bug if this happens. */
WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64);
- c.value = 0;
+ c.value64 = 0;
get_ctrl(ctrl, &c);
- return c.value;
+ return c.value64;
}
EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64);
@@ -3043,7 +3045,7 @@ static void update_from_auto_cluster(struct v4l2_ctrl *master)
{
int i;
- for (i = 0; i < master->ncontrols; i++)
+ for (i = 1; i < master->ncontrols; i++)
cur_to_new(master->cluster[i]);
if (!call_op(master, g_volatile_ctrl))
for (i = 1; i < master->ncontrols; i++)
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 71a1b93b0790..6b1eaeddbdb3 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -637,8 +637,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
ops->vidioc_try_fmt_sliced_vbi_out)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
- } else if (is_sdr) {
- /* SDR specific ioctls */
+ } else if (is_sdr && is_rx) {
+ /* SDR receiver specific ioctls */
if (ops->vidioc_enum_fmt_sdr_cap)
set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
if (ops->vidioc_g_fmt_sdr_cap)
@@ -647,6 +647,16 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if (ops->vidioc_try_fmt_sdr_cap)
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ } else if (is_sdr && is_tx) {
+ /* SDR transmitter specific ioctls */
+ if (ops->vidioc_enum_fmt_sdr_out)
+ set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
+ if (ops->vidioc_g_fmt_sdr_out)
+ set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ if (ops->vidioc_s_fmt_sdr_out)
+ set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ if (ops->vidioc_try_fmt_sdr_out)
+ set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
}
if (is_vid || is_vbi || is_sdr) {
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 4a384fc765b8..7486af2c8ae4 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -26,7 +26,7 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-device.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <trace/events/v4l2.h>
@@ -153,6 +153,7 @@ const char *v4l2_type_names[] = {
[V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane",
[V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane",
[V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap",
+ [V4L2_BUF_TYPE_SDR_OUTPUT] = "sdr-out",
};
EXPORT_SYMBOL(v4l2_type_names);
@@ -326,6 +327,7 @@ static void v4l_print_format(const void *arg, bool write_only)
sliced->service_lines[1][i]);
break;
case V4L2_BUF_TYPE_SDR_CAPTURE:
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
sdr = &p->fmt.sdr;
pr_cont(", pixelformat=%c%c%c%c\n",
(sdr->pixelformat >> 0) & 0xff,
@@ -974,6 +976,10 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
if (is_sdr && is_rx && ops->vidioc_g_fmt_sdr_cap)
return 0;
break;
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (is_sdr && is_tx && ops->vidioc_g_fmt_sdr_out)
+ return 0;
+ break;
default:
break;
}
@@ -1324,6 +1330,11 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
break;
ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg);
break;
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (unlikely(!is_tx || !is_sdr || !ops->vidioc_enum_fmt_sdr_out))
+ break;
+ ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg);
+ break;
}
if (ret == 0)
v4l_fill_fmtdesc(p);
@@ -1418,6 +1429,10 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!is_rx || !is_sdr || !ops->vidioc_g_fmt_sdr_cap))
break;
return ops->vidioc_g_fmt_sdr_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (unlikely(!is_tx || !is_sdr || !ops->vidioc_g_fmt_sdr_out))
+ break;
+ return ops->vidioc_g_fmt_sdr_out(file, fh, arg);
}
return -EINVAL;
}
@@ -1497,6 +1512,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
break;
CLEAR_AFTER_FIELD(p, fmt.sdr);
return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (unlikely(!is_tx || !is_sdr || !ops->vidioc_s_fmt_sdr_out))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.sdr);
+ return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
}
return -EINVAL;
}
@@ -1576,6 +1596,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
break;
CLEAR_AFTER_FIELD(p, fmt.sdr);
return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (unlikely(!is_tx || !is_sdr || !ops->vidioc_try_fmt_sdr_out))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.sdr);
+ return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
}
return -EINVAL;
}
@@ -1621,15 +1646,31 @@ static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_modulator *p = arg;
int err;
+ if (vfd->vfl_type == VFL_TYPE_RADIO)
+ p->type = V4L2_TUNER_RADIO;
+
err = ops->vidioc_g_modulator(file, fh, p);
if (!err)
p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
return err;
}
+static int v4l_s_modulator(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_modulator *p = arg;
+
+ if (vfd->vfl_type == VFL_TYPE_RADIO)
+ p->type = V4L2_TUNER_RADIO;
+
+ return ops->vidioc_s_modulator(file, fh, p);
+}
+
static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
@@ -1637,7 +1678,7 @@ static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
struct v4l2_frequency *p = arg;
if (vfd->vfl_type == VFL_TYPE_SDR)
- p->type = V4L2_TUNER_ADC;
+ p->type = V4L2_TUNER_SDR;
else
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
@@ -1652,7 +1693,7 @@ static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
enum v4l2_tuner_type type;
if (vfd->vfl_type == VFL_TYPE_SDR) {
- if (p->type != V4L2_TUNER_ADC && p->type != V4L2_TUNER_RF)
+ if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF)
return -EINVAL;
} else {
type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
@@ -2277,7 +2318,7 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
int err;
if (vfd->vfl_type == VFL_TYPE_SDR) {
- if (p->type != V4L2_TUNER_ADC && p->type != V4L2_TUNER_RF)
+ if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF)
return -EINVAL;
type = p->type;
} else {
@@ -2416,7 +2457,7 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_STD(VIDIOC_G_AUDOUT, vidioc_g_audout, v4l_print_audioout, 0),
IOCTL_INFO_STD(VIDIOC_S_AUDOUT, vidioc_s_audout, v4l_print_audioout, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)),
- IOCTL_INFO_STD(VIDIOC_S_MODULATOR, vidioc_s_modulator, v4l_print_modulator, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_S_MODULATOR, v4l_s_modulator, v4l_print_modulator, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)),
IOCTL_INFO_FNC(VIDIOC_S_FREQUENCY, v4l_s_frequency, v4l_print_frequency, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)),
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index ec3ad4eb0c57..61d56c940f80 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -17,7 +17,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
@@ -583,32 +583,25 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
goto end;
}
- if (m2m_ctx->m2m_dev->m2m_ops->unlock)
- m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
- else if (m2m_ctx->q_lock)
- mutex_unlock(m2m_ctx->q_lock);
-
+ spin_lock_irqsave(&src_q->done_lock, flags);
if (list_empty(&src_q->done_list))
poll_wait(file, &src_q->done_wq, wait);
+ spin_unlock_irqrestore(&src_q->done_lock, flags);
+
+ spin_lock_irqsave(&dst_q->done_lock, flags);
if (list_empty(&dst_q->done_list)) {
/*
* If the last buffer was dequeued from the capture queue,
* return immediately. DQBUF will return -EPIPE.
*/
- if (dst_q->last_buffer_dequeued)
+ if (dst_q->last_buffer_dequeued) {
+ spin_unlock_irqrestore(&dst_q->done_lock, flags);
return rc | POLLIN | POLLRDNORM;
+ }
poll_wait(file, &dst_q->done_wq, wait);
}
-
- if (m2m_ctx->m2m_dev->m2m_ops->lock)
- m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
- else if (m2m_ctx->q_lock) {
- if (mutex_lock_interruptible(m2m_ctx->q_lock)) {
- rc |= POLLERR;
- goto end;
- }
- }
+ spin_unlock_irqrestore(&dst_q->done_lock, flags);
spin_lock_irqsave(&src_q->done_lock, flags);
if (!list_empty(&src_q->done_list))
@@ -773,13 +766,15 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
*
* Call from buf_queue(), videobuf_queue_ops callback.
*/
-void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf)
{
- struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
+ struct v4l2_m2m_buffer *b = container_of(vbuf,
+ struct v4l2_m2m_buffer, vb);
struct v4l2_m2m_queue_ctx *q_ctx;
unsigned long flags;
- q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
+ q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
if (!q_ctx)
return;
diff --git a/drivers/media/v4l2-core/v4l2-trace.c b/drivers/media/v4l2-core/v4l2-trace.c
index ae10b0248c8e..7416010542c1 100644
--- a/drivers/media/v4l2-core/v4l2-trace.c
+++ b/drivers/media/v4l2-core/v4l2-trace.c
@@ -1,11 +1,11 @@
#include <media/v4l2-common.h>
#include <media/v4l2-fh.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#define CREATE_TRACE_POINTS
#include <trace/events/v4l2.h>
-EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_done);
-EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_queue);
-EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_dqbuf);
-EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_qbuf);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_dqbuf);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_qbuf);
diff --git a/drivers/media/v4l2-core/vb2-trace.c b/drivers/media/v4l2-core/vb2-trace.c
new file mode 100644
index 000000000000..61e74f5936b3
--- /dev/null
+++ b/drivers/media/v4l2-core/vb2-trace.c
@@ -0,0 +1,9 @@
+#include <media/videobuf2-core.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/vb2.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_dqbuf);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_qbuf);
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 926836d1813a..6c02989ee33f 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -576,7 +576,8 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
}
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
|| q->type == V4L2_BUF_TYPE_VBI_OUTPUT
- || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
+ || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
+ || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
buf->size = b->bytesused;
buf->field = b->field;
buf->ts = b->timestamp;
@@ -1154,6 +1155,7 @@ unsigned int videobuf_poll_stream(struct file *file,
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
case V4L2_BUF_TYPE_VBI_OUTPUT:
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
rc = POLLOUT | POLLWRNORM;
break;
default:
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 4f59b7ec05d0..33bdd81065e8 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1,5 +1,5 @@
/*
- * videobuf2-core.c - V4L2 driver helper framework
+ * videobuf2-core.c - video buffer 2 core framework
*
* Copyright (C) 2010 Samsung Electronics
*
@@ -24,164 +24,15 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
-#include <media/v4l2-dev.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-common.h>
#include <media/videobuf2-core.h>
-#include <trace/events/v4l2.h>
+#include <trace/events/vb2.h>
-static int debug;
-module_param(debug, int, 0644);
+#include "videobuf2-internal.h"
-#define dprintk(level, fmt, arg...) \
- do { \
- if (debug >= level) \
- pr_info("vb2: %s: " fmt, __func__, ## arg); \
- } while (0)
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-
-/*
- * If advanced debugging is on, then count how often each op is called
- * successfully, which can either be per-buffer or per-queue.
- *
- * This makes it easy to check that the 'init' and 'cleanup'
- * (and variations thereof) stay balanced.
- */
-
-#define log_memop(vb, op) \
- dprintk(2, "call_memop(%p, %d, %s)%s\n", \
- (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
- (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
-
-#define call_memop(vb, op, args...) \
-({ \
- struct vb2_queue *_q = (vb)->vb2_queue; \
- int err; \
- \
- log_memop(vb, op); \
- err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
- if (!err) \
- (vb)->cnt_mem_ ## op++; \
- err; \
-})
-
-#define call_ptr_memop(vb, op, args...) \
-({ \
- struct vb2_queue *_q = (vb)->vb2_queue; \
- void *ptr; \
- \
- log_memop(vb, op); \
- ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
- if (!IS_ERR_OR_NULL(ptr)) \
- (vb)->cnt_mem_ ## op++; \
- ptr; \
-})
-
-#define call_void_memop(vb, op, args...) \
-({ \
- struct vb2_queue *_q = (vb)->vb2_queue; \
- \
- log_memop(vb, op); \
- if (_q->mem_ops->op) \
- _q->mem_ops->op(args); \
- (vb)->cnt_mem_ ## op++; \
-})
-
-#define log_qop(q, op) \
- dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
- (q)->ops->op ? "" : " (nop)")
-
-#define call_qop(q, op, args...) \
-({ \
- int err; \
- \
- log_qop(q, op); \
- err = (q)->ops->op ? (q)->ops->op(args) : 0; \
- if (!err) \
- (q)->cnt_ ## op++; \
- err; \
-})
-
-#define call_void_qop(q, op, args...) \
-({ \
- log_qop(q, op); \
- if ((q)->ops->op) \
- (q)->ops->op(args); \
- (q)->cnt_ ## op++; \
-})
-
-#define log_vb_qop(vb, op, args...) \
- dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
- (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
- (vb)->vb2_queue->ops->op ? "" : " (nop)")
-
-#define call_vb_qop(vb, op, args...) \
-({ \
- int err; \
- \
- log_vb_qop(vb, op); \
- err = (vb)->vb2_queue->ops->op ? \
- (vb)->vb2_queue->ops->op(args) : 0; \
- if (!err) \
- (vb)->cnt_ ## op++; \
- err; \
-})
-
-#define call_void_vb_qop(vb, op, args...) \
-({ \
- log_vb_qop(vb, op); \
- if ((vb)->vb2_queue->ops->op) \
- (vb)->vb2_queue->ops->op(args); \
- (vb)->cnt_ ## op++; \
-})
-
-#else
-
-#define call_memop(vb, op, args...) \
- ((vb)->vb2_queue->mem_ops->op ? \
- (vb)->vb2_queue->mem_ops->op(args) : 0)
-
-#define call_ptr_memop(vb, op, args...) \
- ((vb)->vb2_queue->mem_ops->op ? \
- (vb)->vb2_queue->mem_ops->op(args) : NULL)
-
-#define call_void_memop(vb, op, args...) \
- do { \
- if ((vb)->vb2_queue->mem_ops->op) \
- (vb)->vb2_queue->mem_ops->op(args); \
- } while (0)
-
-#define call_qop(q, op, args...) \
- ((q)->ops->op ? (q)->ops->op(args) : 0)
-
-#define call_void_qop(q, op, args...) \
- do { \
- if ((q)->ops->op) \
- (q)->ops->op(args); \
- } while (0)
-
-#define call_vb_qop(vb, op, args...) \
- ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
-
-#define call_void_vb_qop(vb, op, args...) \
- do { \
- if ((vb)->vb2_queue->ops->op) \
- (vb)->vb2_queue->ops->op(args); \
- } while (0)
-
-#endif
-
-/* Flags that are set by the vb2 core */
-#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
- V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
- V4L2_BUF_FLAG_PREPARED | \
- V4L2_BUF_FLAG_TIMESTAMP_MASK)
-/* Output buffer flags that should be passed on to the driver */
-#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
- V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
+int vb2_debug;
+EXPORT_SYMBOL_GPL(vb2_debug);
+module_param_named(debug, vb2_debug, int, 0644);
static void __vb2_queue_cancel(struct vb2_queue *q);
static void __enqueue_in_driver(struct vb2_buffer *vb);
@@ -193,7 +44,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
enum dma_data_direction dma_dir =
- V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
void *mem_priv;
int plane;
@@ -211,7 +62,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
/* Associate allocator private data with this plane */
vb->planes[plane].mem_priv = mem_priv;
- vb->v4l2_planes[plane].length = q->plane_sizes[plane];
+ vb->planes[plane].length = q->plane_sizes[plane];
}
return 0;
@@ -235,8 +86,7 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb)
for (plane = 0; plane < vb->num_planes; ++plane) {
call_void_memop(vb, put, vb->planes[plane].mem_priv);
vb->planes[plane].mem_priv = NULL;
- dprintk(3, "freed plane %d of buffer %d\n", plane,
- vb->v4l2_buf.index);
+ dprintk(3, "freed plane %d of buffer %d\n", plane, vb->index);
}
}
@@ -269,7 +119,9 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
call_void_memop(vb, detach_dmabuf, p->mem_priv);
dma_buf_put(p->dbuf);
- memset(p, 0, sizeof(*p));
+ p->mem_priv = NULL;
+ p->dbuf = NULL;
+ p->dbuf_mapped = 0;
}
/**
@@ -299,7 +151,7 @@ static void __setup_lengths(struct vb2_queue *q, unsigned int n)
continue;
for (plane = 0; plane < vb->num_planes; ++plane)
- vb->v4l2_planes[plane].length = q->plane_sizes[plane];
+ vb->planes[plane].length = q->plane_sizes[plane];
}
}
@@ -314,10 +166,10 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
unsigned long off;
if (q->num_buffers) {
- struct v4l2_plane *p;
+ struct vb2_plane *p;
vb = q->bufs[q->num_buffers - 1];
- p = &vb->v4l2_planes[vb->num_planes - 1];
- off = PAGE_ALIGN(p->m.mem_offset + p->length);
+ p = &vb->planes[vb->num_planes - 1];
+ off = PAGE_ALIGN(p->m.offset + p->length);
} else {
off = 0;
}
@@ -328,12 +180,12 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
continue;
for (plane = 0; plane < vb->num_planes; ++plane) {
- vb->v4l2_planes[plane].m.mem_offset = off;
+ vb->planes[plane].m.offset = off;
dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
buffer, plane, off);
- off += vb->v4l2_planes[plane].length;
+ off += vb->planes[plane].length;
off = PAGE_ALIGN(off);
}
}
@@ -346,7 +198,7 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
*
* Returns the number of buffers successfully allocated.
*/
-static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
+static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
unsigned int num_buffers, unsigned int num_planes)
{
unsigned int buffer;
@@ -361,19 +213,15 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
break;
}
- /* Length stores number of planes for multiplanar buffers */
- if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
- vb->v4l2_buf.length = num_planes;
-
vb->state = VB2_BUF_STATE_DEQUEUED;
vb->vb2_queue = q;
vb->num_planes = num_planes;
- vb->v4l2_buf.index = q->num_buffers + buffer;
- vb->v4l2_buf.type = q->type;
- vb->v4l2_buf.memory = memory;
+ vb->index = q->num_buffers + buffer;
+ vb->type = q->type;
+ vb->memory = memory;
/* Allocate video buffer memory for the MMAP type */
- if (memory == V4L2_MEMORY_MMAP) {
+ if (memory == VB2_MEMORY_MMAP) {
ret = __vb2_buf_mem_alloc(vb);
if (ret) {
dprintk(1, "failed allocating memory for "
@@ -400,7 +248,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
}
__setup_lengths(q, buffer);
- if (memory == V4L2_MEMORY_MMAP)
+ if (memory == VB2_MEMORY_MMAP)
__setup_offsets(q, buffer);
dprintk(1, "allocated %d buffers, %d plane(s) each\n",
@@ -424,9 +272,9 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
continue;
/* Free MMAP buffers or release USERPTR buffers */
- if (q->memory == V4L2_MEMORY_MMAP)
+ if (q->memory == VB2_MEMORY_MMAP)
__vb2_buf_mem_free(vb);
- else if (q->memory == V4L2_MEMORY_DMABUF)
+ else if (q->memory == VB2_MEMORY_DMABUF)
__vb2_buf_dmabuf_put(vb);
else
__vb2_buf_userptr_put(vb);
@@ -482,7 +330,7 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
q->cnt_wait_prepare != q->cnt_wait_finish;
- if (unbalanced || debug) {
+ if (unbalanced || vb2_debug) {
pr_info("vb2: counters for queue %p:%s\n", q,
unbalanced ? " UNBALANCED!" : "");
pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
@@ -508,7 +356,7 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
vb->cnt_buf_prepare != vb->cnt_buf_finish ||
vb->cnt_buf_init != vb->cnt_buf_cleanup;
- if (unbalanced || debug) {
+ if (unbalanced || vb2_debug) {
pr_info("vb2: counters for queue %p, buffer %d:%s\n",
q, buffer, unbalanced ? " UNBALANCED!" : "");
pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
@@ -550,76 +398,10 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
}
/**
- * __verify_planes_array() - verify that the planes array passed in struct
- * v4l2_buffer from userspace can be safely used
- */
-static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
-{
- if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
- return 0;
-
- /* Is memory for copying plane information present? */
- if (NULL == b->m.planes) {
- dprintk(1, "multi-planar buffer passed but "
- "planes array not provided\n");
- return -EINVAL;
- }
-
- if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
- dprintk(1, "incorrect planes array length, "
- "expected %d, got %d\n", vb->num_planes, b->length);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * __verify_length() - Verify that the bytesused value for each plane fits in
- * the plane length and that the data offset doesn't exceed the bytesused value.
- */
-static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
-{
- unsigned int length;
- unsigned int bytesused;
- unsigned int plane;
-
- if (!V4L2_TYPE_IS_OUTPUT(b->type))
- return 0;
-
- if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
- for (plane = 0; plane < vb->num_planes; ++plane) {
- length = (b->memory == V4L2_MEMORY_USERPTR ||
- b->memory == V4L2_MEMORY_DMABUF)
- ? b->m.planes[plane].length
- : vb->v4l2_planes[plane].length;
- bytesused = b->m.planes[plane].bytesused
- ? b->m.planes[plane].bytesused : length;
-
- if (b->m.planes[plane].bytesused > length)
- return -EINVAL;
-
- if (b->m.planes[plane].data_offset > 0 &&
- b->m.planes[plane].data_offset >= bytesused)
- return -EINVAL;
- }
- } else {
- length = (b->memory == V4L2_MEMORY_USERPTR)
- ? b->length : vb->v4l2_planes[0].length;
- bytesused = b->bytesused ? b->bytesused : length;
-
- if (b->bytesused > length)
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * __buffer_in_use() - return true if the buffer is in use and
+ * vb2_buffer_in_use() - return true if the buffer is in use and
* the queue cannot be freed (by the means of REQBUFS(0)) call
*/
-static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
+bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
{
unsigned int plane;
for (plane = 0; plane < vb->num_planes; ++plane) {
@@ -635,6 +417,7 @@ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
}
return false;
}
+EXPORT_SYMBOL(vb2_buffer_in_use);
/**
* __buffers_in_use() - return true if any buffers on the queue are in use and
@@ -644,122 +427,30 @@ static bool __buffers_in_use(struct vb2_queue *q)
{
unsigned int buffer;
for (buffer = 0; buffer < q->num_buffers; ++buffer) {
- if (__buffer_in_use(q, q->bufs[buffer]))
+ if (vb2_buffer_in_use(q, q->bufs[buffer]))
return true;
}
return false;
}
/**
- * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
- * returned to userspace
- */
-static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
-{
- struct vb2_queue *q = vb->vb2_queue;
-
- /* Copy back data such as timestamp, flags, etc. */
- memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
- b->reserved2 = vb->v4l2_buf.reserved2;
- b->reserved = vb->v4l2_buf.reserved;
-
- if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
- /*
- * Fill in plane-related data if userspace provided an array
- * for it. The caller has already verified memory and size.
- */
- b->length = vb->num_planes;
- memcpy(b->m.planes, vb->v4l2_planes,
- b->length * sizeof(struct v4l2_plane));
- } else {
- /*
- * We use length and offset in v4l2_planes array even for
- * single-planar buffers, but userspace does not.
- */
- b->length = vb->v4l2_planes[0].length;
- b->bytesused = vb->v4l2_planes[0].bytesused;
- if (q->memory == V4L2_MEMORY_MMAP)
- b->m.offset = vb->v4l2_planes[0].m.mem_offset;
- else if (q->memory == V4L2_MEMORY_USERPTR)
- b->m.userptr = vb->v4l2_planes[0].m.userptr;
- else if (q->memory == V4L2_MEMORY_DMABUF)
- b->m.fd = vb->v4l2_planes[0].m.fd;
- }
-
- /*
- * Clear any buffer state related flags.
- */
- b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
- b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
- if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
- V4L2_BUF_FLAG_TIMESTAMP_COPY) {
- /*
- * For non-COPY timestamps, drop timestamp source bits
- * and obtain the timestamp source from the queue.
- */
- b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- }
-
- switch (vb->state) {
- case VB2_BUF_STATE_QUEUED:
- case VB2_BUF_STATE_ACTIVE:
- b->flags |= V4L2_BUF_FLAG_QUEUED;
- break;
- case VB2_BUF_STATE_ERROR:
- b->flags |= V4L2_BUF_FLAG_ERROR;
- /* fall through */
- case VB2_BUF_STATE_DONE:
- b->flags |= V4L2_BUF_FLAG_DONE;
- break;
- case VB2_BUF_STATE_PREPARED:
- b->flags |= V4L2_BUF_FLAG_PREPARED;
- break;
- case VB2_BUF_STATE_PREPARING:
- case VB2_BUF_STATE_DEQUEUED:
- case VB2_BUF_STATE_REQUEUEING:
- /* nothing */
- break;
- }
-
- if (__buffer_in_use(q, vb))
- b->flags |= V4L2_BUF_FLAG_MAPPED;
-}
-
-/**
- * vb2_querybuf() - query video buffer information
+ * vb2_core_querybuf() - query video buffer information
* @q: videobuf queue
- * @b: buffer struct passed from userspace to vidioc_querybuf handler
- * in driver
+ * @index: id number of the buffer
+ * @pb: buffer struct passed from userspace
*
* Should be called from vidioc_querybuf ioctl handler in driver.
- * This function will verify the passed v4l2_buffer structure and fill the
- * relevant information for the userspace.
+ * The passed buffer should have been verified.
+ * This function fills the relevant information for the userspace.
*
* The return values from this function are intended to be directly returned
* from vidioc_querybuf handler in driver.
*/
-int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
+int vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
{
- struct vb2_buffer *vb;
- int ret;
-
- if (b->type != q->type) {
- dprintk(1, "wrong buffer type\n");
- return -EINVAL;
- }
-
- if (b->index >= q->num_buffers) {
- dprintk(1, "buffer index out of range\n");
- return -EINVAL;
- }
- vb = q->bufs[b->index];
- ret = __verify_planes_array(vb, b);
- if (!ret)
- __fill_v4l2_buffer(vb, b);
- return ret;
+ return call_bufop(q, fill_user_buffer, q->bufs[index], pb);
}
-EXPORT_SYMBOL(vb2_querybuf);
+EXPORT_SYMBOL_GPL(vb2_core_querybuf);
/**
* __verify_userptr_ops() - verify that all memory operations required for
@@ -802,14 +493,14 @@ static int __verify_dmabuf_ops(struct vb2_queue *q)
}
/**
- * __verify_memory_type() - Check whether the memory type and buffer type
+ * vb2_verify_memory_type() - Check whether the memory type and buffer type
* passed to a buffer operation are compatible with the queue.
*/
-static int __verify_memory_type(struct vb2_queue *q,
- enum v4l2_memory memory, enum v4l2_buf_type type)
+int vb2_verify_memory_type(struct vb2_queue *q,
+ enum vb2_memory memory, unsigned int type)
{
- if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
- memory != V4L2_MEMORY_DMABUF) {
+ if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR &&
+ memory != VB2_MEMORY_DMABUF) {
dprintk(1, "unsupported memory type\n");
return -EINVAL;
}
@@ -823,17 +514,17 @@ static int __verify_memory_type(struct vb2_queue *q,
* Make sure all the required memory ops for given memory type
* are available.
*/
- if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
+ if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
dprintk(1, "MMAP for current setup unsupported\n");
return -EINVAL;
}
- if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
+ if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
dprintk(1, "USERPTR for current setup unsupported\n");
return -EINVAL;
}
- if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+ if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
dprintk(1, "DMABUF for current setup unsupported\n");
return -EINVAL;
}
@@ -849,11 +540,13 @@ static int __verify_memory_type(struct vb2_queue *q,
}
return 0;
}
+EXPORT_SYMBOL(vb2_verify_memory_type);
/**
- * __reqbufs() - Initiate streaming
+ * vb2_core_reqbufs() - Initiate streaming
* @q: videobuf2 queue
- * @req: struct passed from userspace to vidioc_reqbufs handler in driver
+ * @memory: memory type
+ * @count: requested buffer count
*
* Should be called from vidioc_reqbufs ioctl handler of a driver.
* This function:
@@ -873,7 +566,8 @@ static int __verify_memory_type(struct vb2_queue *q,
* The return values from this function are intended to be directly returned
* from vidioc_reqbufs handler in driver.
*/
-static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
+int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
+ unsigned int *count)
{
unsigned int num_buffers, allocated_buffers, num_planes = 0;
int ret;
@@ -883,13 +577,13 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return -EBUSY;
}
- if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
+ if (*count == 0 || q->num_buffers != 0 || q->memory != memory) {
/*
* We already have buffers allocated, so first check if they
* are not in use and can be freed.
*/
mutex_lock(&q->mmap_lock);
- if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
+ if (q->memory == VB2_MEMORY_MMAP && __buffers_in_use(q)) {
mutex_unlock(&q->mmap_lock);
dprintk(1, "memory in use, cannot free\n");
return -EBUSY;
@@ -910,18 +604,18 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
* In case of REQBUFS(0) return immediately without calling
* driver's queue_setup() callback and allocating resources.
*/
- if (req->count == 0)
+ if (*count == 0)
return 0;
}
/*
* Make sure the requested values and current defaults are sane.
*/
- num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
+ num_buffers = min_t(unsigned int, *count, VB2_MAX_FRAME);
num_buffers = max_t(unsigned int, num_buffers, q->min_buffers_needed);
memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
- q->memory = req->memory;
+ q->memory = memory;
/*
* Ask the driver how many buffers and planes per buffer it requires.
@@ -933,7 +627,8 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return ret;
/* Finally, allocate buffers and video memory */
- allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
+ allocated_buffers =
+ __vb2_queue_alloc(q, memory, num_buffers, num_planes);
if (allocated_buffers == 0) {
dprintk(1, "memory allocation failed\n");
return -ENOMEM;
@@ -982,31 +677,19 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
* Return the number of successfully allocated buffers
* to the userspace.
*/
- req->count = allocated_buffers;
- q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
+ *count = allocated_buffers;
+ q->waiting_for_buffers = !q->is_output;
return 0;
}
+EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
/**
- * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and
- * type values.
+ * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
* @q: videobuf2 queue
- * @req: struct passed from userspace to vidioc_reqbufs handler in driver
- */
-int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
-{
- int ret = __verify_memory_type(q, req->memory, req->type);
-
- return ret ? ret : __reqbufs(q, req);
-}
-EXPORT_SYMBOL_GPL(vb2_reqbufs);
-
-/**
- * __create_bufs() - Allocate buffers and any required auxiliary structs
- * @q: videobuf2 queue
- * @create: creation parameters, passed from userspace to vidioc_create_bufs
- * handler in driver
+ * @memory: memory type
+ * @count: requested buffer count
+ * @parg: parameter passed to device driver
*
* Should be called from vidioc_create_bufs ioctl handler of a driver.
* This function:
@@ -1017,12 +700,13 @@ EXPORT_SYMBOL_GPL(vb2_reqbufs);
* The return values from this function are intended to be directly returned
* from vidioc_create_bufs handler in driver.
*/
-static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
+int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
+ unsigned int *count, const void *parg)
{
unsigned int num_planes = 0, num_buffers, allocated_buffers;
int ret;
- if (q->num_buffers == VIDEO_MAX_FRAME) {
+ if (q->num_buffers == VB2_MAX_FRAME) {
dprintk(1, "maximum number of buffers already allocated\n");
return -ENOBUFS;
}
@@ -1030,23 +714,23 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
if (!q->num_buffers) {
memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
- q->memory = create->memory;
- q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
+ q->memory = memory;
+ q->waiting_for_buffers = !q->is_output;
}
- num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
+ num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
/*
* Ask the driver, whether the requested number of buffers, planes per
* buffer and their sizes are acceptable
*/
- ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
+ ret = call_qop(q, queue_setup, q, parg, &num_buffers,
&num_planes, q->plane_sizes, q->alloc_ctx);
if (ret)
return ret;
/* Finally, allocate buffers and video memory */
- allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers,
+ allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
num_planes);
if (allocated_buffers == 0) {
dprintk(1, "memory allocation failed\n");
@@ -1063,7 +747,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
* q->num_buffers contains the total number of buffers, that the
* queue driver has set up
*/
- ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
+ ret = call_qop(q, queue_setup, q, parg, &num_buffers,
&num_planes, q->plane_sizes, q->alloc_ctx);
if (!ret && allocated_buffers < num_buffers)
@@ -1093,28 +777,11 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
* Return the number of successfully allocated buffers
* to the userspace.
*/
- create->count = allocated_buffers;
+ *count = allocated_buffers;
return 0;
}
-
-/**
- * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the
- * memory and type values.
- * @q: videobuf2 queue
- * @create: creation parameters, passed from userspace to vidioc_create_bufs
- * handler in driver
- */
-int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
-{
- int ret = __verify_memory_type(q, create->memory, create->format.type);
-
- create->index = q->num_buffers;
- if (create->count == 0)
- return ret != -EBUSY ? ret : 0;
- return ret ? ret : __create_bufs(q, create);
-}
-EXPORT_SYMBOL_GPL(vb2_create_bufs);
+EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
/**
* vb2_plane_vaddr() - Return a kernel virtual address of a given plane
@@ -1197,7 +864,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
vb->cnt_buf_done++;
#endif
dprintk(4, "done processing on buffer %d, state: %d\n",
- vb->v4l2_buf.index, state);
+ vb->index, state);
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
@@ -1256,182 +923,41 @@ void vb2_discard_done(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_discard_done);
-static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
-{
- static bool check_once;
-
- if (check_once)
- return;
-
- check_once = true;
- WARN_ON(1);
-
- pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
- if (vb->vb2_queue->allow_zero_bytesused)
- pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
- else
- pr_warn("use the actual size instead.\n");
-}
-
-/**
- * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
- * v4l2_buffer by the userspace. The caller has already verified that struct
- * v4l2_buffer has a valid number of planes.
- */
-static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
- struct v4l2_plane *v4l2_planes)
-{
- unsigned int plane;
-
- if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
- if (b->memory == V4L2_MEMORY_USERPTR) {
- for (plane = 0; plane < vb->num_planes; ++plane) {
- v4l2_planes[plane].m.userptr =
- b->m.planes[plane].m.userptr;
- v4l2_planes[plane].length =
- b->m.planes[plane].length;
- }
- }
- if (b->memory == V4L2_MEMORY_DMABUF) {
- for (plane = 0; plane < vb->num_planes; ++plane) {
- v4l2_planes[plane].m.fd =
- b->m.planes[plane].m.fd;
- v4l2_planes[plane].length =
- b->m.planes[plane].length;
- }
- }
-
- /* Fill in driver-provided information for OUTPUT types */
- if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- /*
- * Will have to go up to b->length when API starts
- * accepting variable number of planes.
- *
- * If bytesused == 0 for the output buffer, then fall
- * back to the full buffer size. In that case
- * userspace clearly never bothered to set it and
- * it's a safe assumption that they really meant to
- * use the full plane sizes.
- *
- * Some drivers, e.g. old codec drivers, use bytesused == 0
- * as a way to indicate that streaming is finished.
- * In that case, the driver should use the
- * allow_zero_bytesused flag to keep old userspace
- * applications working.
- */
- for (plane = 0; plane < vb->num_planes; ++plane) {
- struct v4l2_plane *pdst = &v4l2_planes[plane];
- struct v4l2_plane *psrc = &b->m.planes[plane];
-
- if (psrc->bytesused == 0)
- vb2_warn_zero_bytesused(vb);
-
- if (vb->vb2_queue->allow_zero_bytesused)
- pdst->bytesused = psrc->bytesused;
- else
- pdst->bytesused = psrc->bytesused ?
- psrc->bytesused : pdst->length;
- pdst->data_offset = psrc->data_offset;
- }
- }
- } else {
- /*
- * Single-planar buffers do not use planes array,
- * so fill in relevant v4l2_buffer struct fields instead.
- * In videobuf we use our internal V4l2_planes struct for
- * single-planar buffers as well, for simplicity.
- *
- * If bytesused == 0 for the output buffer, then fall back
- * to the full buffer size as that's a sensible default.
- *
- * Some drivers, e.g. old codec drivers, use bytesused == 0 as
- * a way to indicate that streaming is finished. In that case,
- * the driver should use the allow_zero_bytesused flag to keep
- * old userspace applications working.
- */
- if (b->memory == V4L2_MEMORY_USERPTR) {
- v4l2_planes[0].m.userptr = b->m.userptr;
- v4l2_planes[0].length = b->length;
- }
-
- if (b->memory == V4L2_MEMORY_DMABUF) {
- v4l2_planes[0].m.fd = b->m.fd;
- v4l2_planes[0].length = b->length;
- }
-
- if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- if (b->bytesused == 0)
- vb2_warn_zero_bytesused(vb);
-
- if (vb->vb2_queue->allow_zero_bytesused)
- v4l2_planes[0].bytesused = b->bytesused;
- else
- v4l2_planes[0].bytesused = b->bytesused ?
- b->bytesused : v4l2_planes[0].length;
- } else
- v4l2_planes[0].bytesused = 0;
-
- }
-
- /* Zero flags that the vb2 core handles */
- vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
- if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
- V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
- /*
- * Non-COPY timestamps and non-OUTPUT queues will get
- * their timestamp and timestamp source flags from the
- * queue.
- */
- vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- }
-
- if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- /*
- * For output buffers mask out the timecode flag:
- * this will be handled later in vb2_internal_qbuf().
- * The 'field' is valid metadata for this output buffer
- * and so that needs to be copied here.
- */
- vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE;
- vb->v4l2_buf.field = b->field;
- } else {
- /* Zero any output buffer flags as this is a capture buffer */
- vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS;
- }
-}
-
/**
* __qbuf_mmap() - handle qbuf of an MMAP buffer
*/
-static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+static int __qbuf_mmap(struct vb2_buffer *vb, const void *pb)
{
- __fill_vb2_buffer(vb, b, vb->v4l2_planes);
- return call_vb_qop(vb, buf_prepare, vb);
+ int ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, pb, vb->planes);
+ return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
}
/**
* __qbuf_userptr() - handle qbuf of a USERPTR buffer
*/
-static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
{
- struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ struct vb2_plane planes[VB2_MAX_PLANES];
struct vb2_queue *q = vb->vb2_queue;
void *mem_priv;
unsigned int plane;
int ret;
enum dma_data_direction dma_dir =
- V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
bool reacquired = vb->planes[0].mem_priv == NULL;
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
- __fill_vb2_buffer(vb, b, planes);
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes);
+ if (ret)
+ return ret;
for (plane = 0; plane < vb->num_planes; ++plane) {
/* Skip the plane if already verified */
- if (vb->v4l2_planes[plane].m.userptr &&
- vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
- && vb->v4l2_planes[plane].length == planes[plane].length)
+ if (vb->planes[plane].m.userptr &&
+ vb->planes[plane].m.userptr == planes[plane].m.userptr
+ && vb->planes[plane].length == planes[plane].length)
continue;
dprintk(3, "userspace address for plane %d changed, "
@@ -1457,7 +983,10 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
}
vb->planes[plane].mem_priv = NULL;
- memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
+ vb->planes[plane].bytesused = 0;
+ vb->planes[plane].length = 0;
+ vb->planes[plane].m.userptr = 0;
+ vb->planes[plane].data_offset = 0;
/* Acquire each plane's memory */
mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
@@ -1476,8 +1005,12 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
* Now that everything is in order, copy relevant information
* provided by userspace.
*/
- for (plane = 0; plane < vb->num_planes; ++plane)
- vb->v4l2_planes[plane] = planes[plane];
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ vb->planes[plane].bytesused = planes[plane].bytesused;
+ vb->planes[plane].length = planes[plane].length;
+ vb->planes[plane].m.userptr = planes[plane].m.userptr;
+ vb->planes[plane].data_offset = planes[plane].data_offset;
+ }
if (reacquired) {
/*
@@ -1504,10 +1037,11 @@ err:
/* In case of errors, release planes that were already acquired */
for (plane = 0; plane < vb->num_planes; ++plane) {
if (vb->planes[plane].mem_priv)
- call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
+ call_void_memop(vb, put_userptr,
+ vb->planes[plane].mem_priv);
vb->planes[plane].mem_priv = NULL;
- vb->v4l2_planes[plane].m.userptr = 0;
- vb->v4l2_planes[plane].length = 0;
+ vb->planes[plane].m.userptr = 0;
+ vb->planes[plane].length = 0;
}
return ret;
@@ -1516,20 +1050,22 @@ err:
/**
* __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
*/
-static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
{
- struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ struct vb2_plane planes[VB2_MAX_PLANES];
struct vb2_queue *q = vb->vb2_queue;
void *mem_priv;
unsigned int plane;
int ret;
enum dma_data_direction dma_dir =
- V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
bool reacquired = vb->planes[0].mem_priv == NULL;
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
- __fill_vb2_buffer(vb, b, planes);
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes);
+ if (ret)
+ return ret;
for (plane = 0; plane < vb->num_planes; ++plane) {
struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
@@ -1554,7 +1090,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
/* Skip the plane if already verified */
if (dbuf == vb->planes[plane].dbuf &&
- vb->v4l2_planes[plane].length == planes[plane].length) {
+ vb->planes[plane].length == planes[plane].length) {
dma_buf_put(dbuf);
continue;
}
@@ -1568,11 +1104,15 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
/* Release previously acquired memory if present */
__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
- memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
+ vb->planes[plane].bytesused = 0;
+ vb->planes[plane].length = 0;
+ vb->planes[plane].m.fd = 0;
+ vb->planes[plane].data_offset = 0;
/* Acquire each plane's memory */
- mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
- dbuf, planes[plane].length, dma_dir);
+ mem_priv = call_ptr_memop(vb, attach_dmabuf,
+ q->alloc_ctx[plane], dbuf, planes[plane].length,
+ dma_dir);
if (IS_ERR(mem_priv)) {
dprintk(1, "failed to attach dmabuf\n");
ret = PTR_ERR(mem_priv);
@@ -1602,8 +1142,12 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
* Now that everything is in order, copy relevant information
* provided by userspace.
*/
- for (plane = 0; plane < vb->num_planes; ++plane)
- vb->v4l2_planes[plane] = planes[plane];
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ vb->planes[plane].bytesused = planes[plane].bytesused;
+ vb->planes[plane].length = planes[plane].length;
+ vb->planes[plane].m.fd = planes[plane].m.fd;
+ vb->planes[plane].data_offset = planes[plane].data_offset;
+ }
if (reacquired) {
/*
@@ -1652,49 +1196,27 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
call_void_vb_qop(vb, buf_queue, vb);
}
-static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
{
struct vb2_queue *q = vb->vb2_queue;
int ret;
- ret = __verify_length(vb, b);
- if (ret < 0) {
- dprintk(1, "plane parameters verification failed: %d\n", ret);
- return ret;
- }
- if (b->field == V4L2_FIELD_ALTERNATE && V4L2_TYPE_IS_OUTPUT(q->type)) {
- /*
- * If the format's field is ALTERNATE, then the buffer's field
- * should be either TOP or BOTTOM, not ALTERNATE since that
- * makes no sense. The driver has to know whether the
- * buffer represents a top or a bottom field in order to
- * program any DMA correctly. Using ALTERNATE is wrong, since
- * that just says that it is either a top or a bottom field,
- * but not which of the two it is.
- */
- dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
- return -EINVAL;
- }
-
if (q->error) {
dprintk(1, "fatal error occurred on queue\n");
return -EIO;
}
vb->state = VB2_BUF_STATE_PREPARING;
- vb->v4l2_buf.timestamp.tv_sec = 0;
- vb->v4l2_buf.timestamp.tv_usec = 0;
- vb->v4l2_buf.sequence = 0;
switch (q->memory) {
- case V4L2_MEMORY_MMAP:
- ret = __qbuf_mmap(vb, b);
+ case VB2_MEMORY_MMAP:
+ ret = __qbuf_mmap(vb, pb);
break;
- case V4L2_MEMORY_USERPTR:
- ret = __qbuf_userptr(vb, b);
+ case VB2_MEMORY_USERPTR:
+ ret = __qbuf_userptr(vb, pb);
break;
- case V4L2_MEMORY_DMABUF:
- ret = __qbuf_dmabuf(vb, b);
+ case VB2_MEMORY_DMABUF:
+ ret = __qbuf_dmabuf(vb, pb);
break;
default:
WARN(1, "Invalid queue type\n");
@@ -1708,79 +1230,48 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
return ret;
}
-static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
- const char *opname)
-{
- if (b->type != q->type) {
- dprintk(1, "%s: invalid buffer type\n", opname);
- return -EINVAL;
- }
-
- if (b->index >= q->num_buffers) {
- dprintk(1, "%s: buffer index out of range\n", opname);
- return -EINVAL;
- }
-
- if (q->bufs[b->index] == NULL) {
- /* Should never happen */
- dprintk(1, "%s: buffer is NULL\n", opname);
- return -EINVAL;
- }
-
- if (b->memory != q->memory) {
- dprintk(1, "%s: invalid memory type\n", opname);
- return -EINVAL;
- }
-
- return __verify_planes_array(q->bufs[b->index], b);
-}
-
/**
- * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
+ * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
+ * to the kernel
* @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_prepare_buf
+ * @index: id number of the buffer
+ * @pb: buffer structure passed from userspace to vidioc_prepare_buf
* handler in driver
*
* Should be called from vidioc_prepare_buf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_prepare callback in the driver (if provided), in which
- * driver-specific buffer initialization can be performed,
+ * The passed buffer should have been verified.
+ * This function calls buf_prepare callback in the driver (if provided),
+ * in which driver-specific buffer initialization can be performed,
*
* The return values from this function are intended to be directly returned
* from vidioc_prepare_buf handler in driver.
*/
-int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
+int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
{
struct vb2_buffer *vb;
int ret;
- if (vb2_fileio_is_active(q)) {
- dprintk(1, "file io in progress\n");
- return -EBUSY;
- }
-
- ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
- if (ret)
- return ret;
-
- vb = q->bufs[b->index];
+ vb = q->bufs[index];
if (vb->state != VB2_BUF_STATE_DEQUEUED) {
dprintk(1, "invalid buffer state %d\n",
vb->state);
return -EINVAL;
}
- ret = __buf_prepare(vb, b);
- if (!ret) {
- /* Fill buffer information for the userspace */
- __fill_v4l2_buffer(vb, b);
+ ret = __buf_prepare(vb, pb);
+ if (ret)
+ return ret;
+
+ /* Fill buffer information for the userspace */
+ ret = call_bufop(q, fill_user_buffer, vb, pb);
+ if (ret)
+ return ret;
+
+ dprintk(1, "prepare of buffer %d succeeded\n", vb->index);
- dprintk(1, "prepare of buffer %d succeeded\n", vb->v4l2_buf.index);
- }
return ret;
}
-EXPORT_SYMBOL_GPL(vb2_prepare_buf);
+EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
/**
* vb2_start_streaming() - Attempt to start streaming.
@@ -1845,19 +1336,34 @@ static int vb2_start_streaming(struct vb2_queue *q)
return ret;
}
-static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+/**
+ * vb2_core_qbuf() - Queue a buffer from userspace
+ * @q: videobuf2 queue
+ * @index: id number of the buffer
+ * @pb: buffer structure passed from userspace to vidioc_qbuf handler
+ * in driver
+ *
+ * Should be called from vidioc_qbuf ioctl handler of a driver.
+ * The passed buffer should have been verified.
+ * This function:
+ * 1) if necessary, calls buf_prepare callback in the driver (if provided), in
+ * which driver-specific buffer initialization can be performed,
+ * 2) if streaming is on, queues the buffer in driver by the means of buf_queue
+ * callback for processing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_qbuf handler in driver.
+ */
+int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
{
- int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
struct vb2_buffer *vb;
+ int ret;
- if (ret)
- return ret;
-
- vb = q->bufs[b->index];
+ vb = q->bufs[index];
switch (vb->state) {
case VB2_BUF_STATE_DEQUEUED:
- ret = __buf_prepare(vb, b);
+ ret = __buf_prepare(vb, pb);
if (ret)
return ret;
break;
@@ -1879,18 +1385,8 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
q->queued_count++;
q->waiting_for_buffers = false;
vb->state = VB2_BUF_STATE_QUEUED;
- if (V4L2_TYPE_IS_OUTPUT(q->type)) {
- /*
- * For output buffers copy the timestamp if needed,
- * and the timecode field and flag if needed.
- */
- if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
- V4L2_BUF_FLAG_TIMESTAMP_COPY)
- vb->v4l2_buf.timestamp = b->timestamp;
- vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
- if (b->flags & V4L2_BUF_FLAG_TIMECODE)
- vb->v4l2_buf.timecode = b->timecode;
- }
+
+ call_bufop(q, set_timestamp, vb, pb);
trace_vb2_qbuf(q, vb);
@@ -1902,7 +1398,9 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
__enqueue_in_driver(vb);
/* Fill buffer information for the userspace */
- __fill_v4l2_buffer(vb, b);
+ ret = call_bufop(q, fill_user_buffer, vb, pb);
+ if (ret)
+ return ret;
/*
* If streamon has been called, and we haven't yet called
@@ -1917,37 +1415,10 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
return ret;
}
- dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
+ dprintk(1, "qbuf of buffer %d succeeded\n", vb->index);
return 0;
}
-
-/**
- * vb2_qbuf() - Queue a buffer from userspace
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_qbuf handler
- * in driver
- *
- * Should be called from vidioc_qbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
- * which driver-specific buffer initialization can be performed,
- * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
- * callback for processing.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_qbuf handler in driver.
- */
-int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
-{
- if (vb2_fileio_is_active(q)) {
- dprintk(1, "file io in progress\n");
- return -EBUSY;
- }
-
- return vb2_internal_qbuf(q, b);
-}
-EXPORT_SYMBOL_GPL(vb2_qbuf);
+EXPORT_SYMBOL_GPL(vb2_core_qbuf);
/**
* __vb2_wait_for_done_vb() - wait for a buffer to become available
@@ -2031,7 +1502,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
* Will sleep if required for nonblocking == false.
*/
static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
- struct v4l2_buffer *b, int nonblocking)
+ int nonblocking)
{
unsigned long flags;
int ret;
@@ -2052,10 +1523,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
/*
* Only remove the buffer from done_list if v4l2_buffer can handle all
* the planes.
+ * Verifying planes is NOT necessary since it already has been checked
+ * before the buffer is queued/prepared. So it can never fail.
*/
- ret = __verify_planes_array(*vb, b);
- if (!ret)
- list_del(&(*vb)->done_entry);
+ list_del(&(*vb)->done_entry);
spin_unlock_irqrestore(&q->done_lock, flags);
return ret;
@@ -2098,7 +1569,7 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
vb->state = VB2_BUF_STATE_DEQUEUED;
/* unmap DMABUF buffer */
- if (q->memory == V4L2_MEMORY_DMABUF)
+ if (q->memory == VB2_MEMORY_DMABUF)
for (i = 0; i < vb->num_planes; ++i) {
if (!vb->planes[i].dbuf_mapped)
continue;
@@ -2107,16 +1578,33 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
}
}
-static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+/**
+ * vb2_dqbuf() - Dequeue a buffer to the userspace
+ * @q: videobuf2 queue
+ * @pb: buffer structure passed from userspace to vidioc_dqbuf handler
+ * in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ * buffers ready for dequeuing are present. Normally the driver
+ * would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from vidioc_dqbuf ioctl handler of a driver.
+ * The passed buffer should have been verified.
+ * This function:
+ * 1) calls buf_finish callback in the driver (if provided), in which
+ * driver can perform any additional operations that may be required before
+ * returning the buffer to userspace, such as cache sync,
+ * 2) the buffer struct members are filled with relevant information for
+ * the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_dqbuf handler in driver.
+ */
+int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
{
struct vb2_buffer *vb = NULL;
int ret;
- if (b->type != q->type) {
- dprintk(1, "invalid buffer type\n");
- return -EINVAL;
- }
- ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
+ ret = __vb2_get_done_vb(q, &vb, nonblocking);
if (ret < 0)
return ret;
@@ -2135,55 +1623,26 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool n
call_void_vb_qop(vb, buf_finish, vb);
/* Fill buffer information for the userspace */
- __fill_v4l2_buffer(vb, b);
+ ret = call_bufop(q, fill_user_buffer, vb, pb);
+ if (ret)
+ return ret;
+
/* Remove from videobuf queue */
list_del(&vb->queued_entry);
q->queued_count--;
trace_vb2_dqbuf(q, vb);
- if (!V4L2_TYPE_IS_OUTPUT(q->type) &&
- vb->v4l2_buf.flags & V4L2_BUF_FLAG_LAST)
- q->last_buffer_dequeued = true;
/* go back to dequeued state */
__vb2_dqbuf(vb);
dprintk(1, "dqbuf of buffer %d, with state %d\n",
- vb->v4l2_buf.index, vb->state);
+ vb->index, vb->state);
return 0;
-}
-/**
- * vb2_dqbuf() - Dequeue a buffer to the userspace
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_dqbuf handler
- * in driver
- * @nonblocking: if true, this call will not sleep waiting for a buffer if no
- * buffers ready for dequeuing are present. Normally the driver
- * would be passing (file->f_flags & O_NONBLOCK) here
- *
- * Should be called from vidioc_dqbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_finish callback in the driver (if provided), in which
- * driver can perform any additional operations that may be required before
- * returning the buffer to userspace, such as cache sync,
- * 3) the buffer struct members are filled with relevant information for
- * the userspace.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_dqbuf handler in driver.
- */
-int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
-{
- if (vb2_fileio_is_active(q)) {
- dprintk(1, "file io in progress\n");
- return -EBUSY;
- }
- return vb2_internal_dqbuf(q, b, nonblocking);
}
-EXPORT_SYMBOL_GPL(vb2_dqbuf);
+EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
/**
* __vb2_queue_cancel() - cancel and stop (pause) streaming
@@ -2253,7 +1712,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
}
}
-static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
+int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
{
int ret;
@@ -2295,6 +1754,7 @@ static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
dprintk(3, "successful\n");
return 0;
}
+EXPORT_SYMBOL_GPL(vb2_core_streamon);
/**
* vb2_queue_error() - signal a fatal error on the queue
@@ -2317,30 +1777,7 @@ void vb2_queue_error(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_queue_error);
-/**
- * vb2_streamon - start streaming
- * @q: videobuf2 queue
- * @type: type argument passed from userspace to vidioc_streamon handler
- *
- * Should be called from vidioc_streamon handler of a driver.
- * This function:
- * 1) verifies current state
- * 2) passes any previously queued buffers to the driver and starts streaming
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_streamon handler in the driver.
- */
-int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
-{
- if (vb2_fileio_is_active(q)) {
- dprintk(1, "file io in progress\n");
- return -EBUSY;
- }
- return vb2_internal_streamon(q, type);
-}
-EXPORT_SYMBOL_GPL(vb2_streamon);
-
-static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
{
if (type != q->type) {
dprintk(1, "invalid stream type\n");
@@ -2357,37 +1794,13 @@ static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
* their normal dequeued state.
*/
__vb2_queue_cancel(q);
- q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
+ q->waiting_for_buffers = !q->is_output;
q->last_buffer_dequeued = false;
dprintk(3, "successful\n");
return 0;
}
-
-/**
- * vb2_streamoff - stop streaming
- * @q: videobuf2 queue
- * @type: type argument passed from userspace to vidioc_streamoff handler
- *
- * Should be called from vidioc_streamoff handler of a driver.
- * This function:
- * 1) verifies current state,
- * 2) stop streaming and dequeues any queued buffers, including those previously
- * passed to the driver (after waiting for the driver to finish).
- *
- * This call can be used for pausing playback.
- * The return values from this function are intended to be directly returned
- * from vidioc_streamoff handler in the driver
- */
-int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
-{
- if (vb2_fileio_is_active(q)) {
- dprintk(1, "file io in progress\n");
- return -EBUSY;
- }
- return vb2_internal_streamoff(q, type);
-}
-EXPORT_SYMBOL_GPL(vb2_streamoff);
+EXPORT_SYMBOL_GPL(vb2_core_streamoff);
/**
* __find_plane_by_offset() - find plane associated with the given offset off
@@ -2407,7 +1820,7 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
vb = q->bufs[buffer];
for (plane = 0; plane < vb->num_planes; ++plane) {
- if (vb->v4l2_planes[plane].m.mem_offset == off) {
+ if (vb->planes[plane].m.offset == off) {
*_buffer = buffer;
*_plane = plane;
return 0;
@@ -2419,22 +1832,27 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
}
/**
- * vb2_expbuf() - Export a buffer as a file descriptor
+ * vb2_core_expbuf() - Export a buffer as a file descriptor
* @q: videobuf2 queue
- * @eb: export buffer structure passed from userspace to vidioc_expbuf
- * handler in driver
+ * @fd: file descriptor associated with DMABUF (set by driver) *
+ * @type: buffer type
+ * @index: id number of the buffer
+ * @plane: index of the plane to be exported, 0 for single plane queues
+ * @flags: flags for newly created file, currently only O_CLOEXEC is
+ * supported, refer to manual of open syscall for more details
*
* The return values from this function are intended to be directly returned
* from vidioc_expbuf handler in driver.
*/
-int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
+int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
+ unsigned int index, unsigned int plane, unsigned int flags)
{
struct vb2_buffer *vb = NULL;
struct vb2_plane *vb_plane;
int ret;
struct dma_buf *dbuf;
- if (q->memory != V4L2_MEMORY_MMAP) {
+ if (q->memory != VB2_MEMORY_MMAP) {
dprintk(1, "queue is not currently set up for mmap\n");
return -EINVAL;
}
@@ -2444,24 +1862,24 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
return -EINVAL;
}
- if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
+ if (flags & ~(O_CLOEXEC | O_ACCMODE)) {
dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n");
return -EINVAL;
}
- if (eb->type != q->type) {
+ if (type != q->type) {
dprintk(1, "invalid buffer type\n");
return -EINVAL;
}
- if (eb->index >= q->num_buffers) {
+ if (index >= q->num_buffers) {
dprintk(1, "buffer index out of range\n");
return -EINVAL;
}
- vb = q->bufs[eb->index];
+ vb = q->bufs[index];
- if (eb->plane >= vb->num_planes) {
+ if (plane >= vb->num_planes) {
dprintk(1, "buffer plane out of range\n");
return -EINVAL;
}
@@ -2471,30 +1889,31 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
return -EBUSY;
}
- vb_plane = &vb->planes[eb->plane];
+ vb_plane = &vb->planes[plane];
- dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
+ dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv,
+ flags & O_ACCMODE);
if (IS_ERR_OR_NULL(dbuf)) {
dprintk(1, "failed to export buffer %d, plane %d\n",
- eb->index, eb->plane);
+ index, plane);
return -EINVAL;
}
- ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
+ ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
if (ret < 0) {
dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
- eb->index, eb->plane, ret);
+ index, plane, ret);
dma_buf_put(dbuf);
return ret;
}
dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
- eb->index, eb->plane, ret);
- eb->fd = ret;
+ index, plane, ret);
+ *fd = ret;
return 0;
}
-EXPORT_SYMBOL_GPL(vb2_expbuf);
+EXPORT_SYMBOL_GPL(vb2_core_expbuf);
/**
* vb2_mmap() - map video buffers into application address space
@@ -2523,7 +1942,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
int ret;
unsigned long length;
- if (q->memory != V4L2_MEMORY_MMAP) {
+ if (q->memory != VB2_MEMORY_MMAP) {
dprintk(1, "queue is not currently set up for mmap\n");
return -EINVAL;
}
@@ -2535,7 +1954,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
dprintk(1, "invalid vma flags, VM_SHARED needed\n");
return -EINVAL;
}
- if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (q->is_output) {
if (!(vma->vm_flags & VM_WRITE)) {
dprintk(1, "invalid vma flags, VM_WRITE needed\n");
return -EINVAL;
@@ -2565,7 +1984,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
* The buffer length was page_aligned at __vb2_buf_mem_alloc(),
* so, we need to do the same here.
*/
- length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
+ length = PAGE_ALIGN(vb->planes[plane].length);
if (length < (vma->vm_end - vma->vm_start)) {
dprintk(1,
"MMAP invalid, as it would overflow buffer length\n");
@@ -2596,7 +2015,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
void *vaddr;
int ret;
- if (q->memory != V4L2_MEMORY_MMAP) {
+ if (q->memory != VB2_MEMORY_MMAP) {
dprintk(1, "queue is not currently set up for mmap\n");
return -EINVAL;
}
@@ -2616,123 +2035,8 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
#endif
-static int __vb2_init_fileio(struct vb2_queue *q, int read);
-static int __vb2_cleanup_fileio(struct vb2_queue *q);
-
-/**
- * vb2_poll() - implements poll userspace operation
- * @q: videobuf2 queue
- * @file: file argument passed to the poll file operation handler
- * @wait: wait argument passed to the poll file operation handler
- *
- * This function implements poll file operation handler for a driver.
- * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
- * be informed that the file descriptor of a video device is available for
- * reading.
- * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
- * will be reported as available for writing.
- *
- * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
- * pending events.
- *
- * The return values from this function are intended to be directly returned
- * from poll handler in driver.
- */
-unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
-{
- struct video_device *vfd = video_devdata(file);
- unsigned long req_events = poll_requested_events(wait);
- struct vb2_buffer *vb = NULL;
- unsigned int res = 0;
- unsigned long flags;
-
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
- struct v4l2_fh *fh = file->private_data;
-
- if (v4l2_event_pending(fh))
- res = POLLPRI;
- else if (req_events & POLLPRI)
- poll_wait(file, &fh->wait, wait);
- }
-
- if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
- return res;
- if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
- return res;
-
- /*
- * Start file I/O emulator only if streaming API has not been used yet.
- */
- if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
- if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
- (req_events & (POLLIN | POLLRDNORM))) {
- if (__vb2_init_fileio(q, 1))
- return res | POLLERR;
- }
- if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
- (req_events & (POLLOUT | POLLWRNORM))) {
- if (__vb2_init_fileio(q, 0))
- return res | POLLERR;
- /*
- * Write to OUTPUT queue can be done immediately.
- */
- return res | POLLOUT | POLLWRNORM;
- }
- }
-
- /*
- * There is nothing to wait for if the queue isn't streaming, or if the
- * error flag is set.
- */
- if (!vb2_is_streaming(q) || q->error)
- return res | POLLERR;
- /*
- * For compatibility with vb1: if QBUF hasn't been called yet, then
- * return POLLERR as well. This only affects capture queues, output
- * queues will always initialize waiting_for_buffers to false.
- */
- if (q->waiting_for_buffers)
- return res | POLLERR;
-
- /*
- * For output streams you can write as long as there are fewer buffers
- * queued than there are buffers available.
- */
- if (V4L2_TYPE_IS_OUTPUT(q->type) && q->queued_count < q->num_buffers)
- return res | POLLOUT | POLLWRNORM;
-
- if (list_empty(&q->done_list)) {
- /*
- * If the last buffer was dequeued from a capture queue,
- * return immediately. DQBUF will return -EPIPE.
- */
- if (q->last_buffer_dequeued)
- return res | POLLIN | POLLRDNORM;
-
- poll_wait(file, &q->done_wq, wait);
- }
-
- /*
- * Take first buffer available for dequeuing.
- */
- spin_lock_irqsave(&q->done_lock, flags);
- if (!list_empty(&q->done_list))
- vb = list_first_entry(&q->done_list, struct vb2_buffer,
- done_entry);
- spin_unlock_irqrestore(&q->done_lock, flags);
-
- if (vb && (vb->state == VB2_BUF_STATE_DONE
- || vb->state == VB2_BUF_STATE_ERROR)) {
- return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
- res | POLLOUT | POLLWRNORM :
- res | POLLIN | POLLRDNORM;
- }
- return res;
-}
-EXPORT_SYMBOL_GPL(vb2_poll);
-
/**
- * vb2_queue_init() - initialize a videobuf2 queue
+ * vb2_core_queue_init() - initialize a videobuf2 queue
* @q: videobuf2 queue; this structure should be allocated in driver
*
* The vb2_queue structure should be allocated by the driver. The driver is
@@ -2742,7 +2046,7 @@ EXPORT_SYMBOL_GPL(vb2_poll);
* to the struct vb2_queue description in include/media/videobuf2-core.h
* for more information.
*/
-int vb2_queue_init(struct vb2_queue *q)
+int vb2_core_queue_init(struct vb2_queue *q)
{
/*
* Sanity check
@@ -2753,16 +2057,9 @@ int vb2_queue_init(struct vb2_queue *q)
WARN_ON(!q->type) ||
WARN_ON(!q->io_modes) ||
WARN_ON(!q->ops->queue_setup) ||
- WARN_ON(!q->ops->buf_queue) ||
- WARN_ON(q->timestamp_flags &
- ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
- V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
+ WARN_ON(!q->ops->buf_queue))
return -EINVAL;
- /* Warn that the driver should choose an appropriate timestamp type */
- WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
- V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
-
INIT_LIST_HEAD(&q->queued_list);
INIT_LIST_HEAD(&q->done_list);
spin_lock_init(&q->done_lock);
@@ -2774,819 +2071,24 @@ int vb2_queue_init(struct vb2_queue *q)
return 0;
}
-EXPORT_SYMBOL_GPL(vb2_queue_init);
+EXPORT_SYMBOL_GPL(vb2_core_queue_init);
/**
- * vb2_queue_release() - stop streaming, release the queue and free memory
+ * vb2_core_queue_release() - stop streaming, release the queue and free memory
* @q: videobuf2 queue
*
* This function stops streaming and performs necessary clean ups, including
* freeing video buffer memory. The driver is responsible for freeing
* the vb2_queue structure itself.
*/
-void vb2_queue_release(struct vb2_queue *q)
+void vb2_core_queue_release(struct vb2_queue *q)
{
- __vb2_cleanup_fileio(q);
__vb2_queue_cancel(q);
mutex_lock(&q->mmap_lock);
__vb2_queue_free(q, q->num_buffers);
mutex_unlock(&q->mmap_lock);
}
-EXPORT_SYMBOL_GPL(vb2_queue_release);
-
-/**
- * struct vb2_fileio_buf - buffer context used by file io emulator
- *
- * vb2 provides a compatibility layer and emulator of file io (read and
- * write) calls on top of streaming API. This structure is used for
- * tracking context related to the buffers.
- */
-struct vb2_fileio_buf {
- void *vaddr;
- unsigned int size;
- unsigned int pos;
- unsigned int queued:1;
-};
-
-/**
- * struct vb2_fileio_data - queue context used by file io emulator
- *
- * @cur_index: the index of the buffer currently being read from or
- * written to. If equal to q->num_buffers then a new buffer
- * must be dequeued.
- * @initial_index: in the read() case all buffers are queued up immediately
- * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
- * buffers. However, in the write() case no buffers are initially
- * queued, instead whenever a buffer is full it is queued up by
- * __vb2_perform_fileio(). Only once all available buffers have
- * been queued up will __vb2_perform_fileio() start to dequeue
- * buffers. This means that initially __vb2_perform_fileio()
- * needs to know what buffer index to use when it is queuing up
- * the buffers for the first time. That initial index is stored
- * in this field. Once it is equal to q->num_buffers all
- * available buffers have been queued and __vb2_perform_fileio()
- * should start the normal dequeue/queue cycle.
- *
- * vb2 provides a compatibility layer and emulator of file io (read and
- * write) calls on top of streaming API. For proper operation it required
- * this structure to save the driver state between each call of the read
- * or write function.
- */
-struct vb2_fileio_data {
- struct v4l2_requestbuffers req;
- struct v4l2_plane p;
- struct v4l2_buffer b;
- struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
- unsigned int cur_index;
- unsigned int initial_index;
- unsigned int q_count;
- unsigned int dq_count;
- unsigned read_once:1;
- unsigned write_immediately:1;
-};
-
-/**
- * __vb2_init_fileio() - initialize file io emulator
- * @q: videobuf2 queue
- * @read: mode selector (1 means read, 0 means write)
- */
-static int __vb2_init_fileio(struct vb2_queue *q, int read)
-{
- struct vb2_fileio_data *fileio;
- int i, ret;
- unsigned int count = 0;
-
- /*
- * Sanity check
- */
- if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
- (!read && !(q->io_modes & VB2_WRITE))))
- return -EINVAL;
-
- /*
- * Check if device supports mapping buffers to kernel virtual space.
- */
- if (!q->mem_ops->vaddr)
- return -EBUSY;
-
- /*
- * Check if streaming api has not been already activated.
- */
- if (q->streaming || q->num_buffers > 0)
- return -EBUSY;
-
- /*
- * Start with count 1, driver can increase it in queue_setup()
- */
- count = 1;
-
- dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
- (read) ? "read" : "write", count, q->fileio_read_once,
- q->fileio_write_immediately);
-
- fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
- if (fileio == NULL)
- return -ENOMEM;
-
- fileio->read_once = q->fileio_read_once;
- fileio->write_immediately = q->fileio_write_immediately;
-
- /*
- * Request buffers and use MMAP type to force driver
- * to allocate buffers by itself.
- */
- fileio->req.count = count;
- fileio->req.memory = V4L2_MEMORY_MMAP;
- fileio->req.type = q->type;
- q->fileio = fileio;
- ret = __reqbufs(q, &fileio->req);
- if (ret)
- goto err_kfree;
-
- /*
- * Check if plane_count is correct
- * (multiplane buffers are not supported).
- */
- if (q->bufs[0]->num_planes != 1) {
- ret = -EBUSY;
- goto err_reqbufs;
- }
-
- /*
- * Get kernel address of each buffer.
- */
- for (i = 0; i < q->num_buffers; i++) {
- fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
- if (fileio->bufs[i].vaddr == NULL) {
- ret = -EINVAL;
- goto err_reqbufs;
- }
- fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
- }
-
- /*
- * Read mode requires pre queuing of all buffers.
- */
- if (read) {
- bool is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
-
- /*
- * Queue all buffers.
- */
- for (i = 0; i < q->num_buffers; i++) {
- struct v4l2_buffer *b = &fileio->b;
-
- memset(b, 0, sizeof(*b));
- b->type = q->type;
- if (is_multiplanar) {
- memset(&fileio->p, 0, sizeof(fileio->p));
- b->m.planes = &fileio->p;
- b->length = 1;
- }
- b->memory = q->memory;
- b->index = i;
- ret = vb2_internal_qbuf(q, b);
- if (ret)
- goto err_reqbufs;
- fileio->bufs[i].queued = 1;
- }
- /*
- * All buffers have been queued, so mark that by setting
- * initial_index to q->num_buffers
- */
- fileio->initial_index = q->num_buffers;
- fileio->cur_index = q->num_buffers;
- }
-
- /*
- * Start streaming.
- */
- ret = vb2_internal_streamon(q, q->type);
- if (ret)
- goto err_reqbufs;
-
- return ret;
-
-err_reqbufs:
- fileio->req.count = 0;
- __reqbufs(q, &fileio->req);
-
-err_kfree:
- q->fileio = NULL;
- kfree(fileio);
- return ret;
-}
-
-/**
- * __vb2_cleanup_fileio() - free resourced used by file io emulator
- * @q: videobuf2 queue
- */
-static int __vb2_cleanup_fileio(struct vb2_queue *q)
-{
- struct vb2_fileio_data *fileio = q->fileio;
-
- if (fileio) {
- vb2_internal_streamoff(q, q->type);
- q->fileio = NULL;
- fileio->req.count = 0;
- vb2_reqbufs(q, &fileio->req);
- kfree(fileio);
- dprintk(3, "file io emulator closed\n");
- }
- return 0;
-}
-
-/**
- * __vb2_perform_fileio() - perform a single file io (read or write) operation
- * @q: videobuf2 queue
- * @data: pointed to target userspace buffer
- * @count: number of bytes to read or write
- * @ppos: file handle position tracking pointer
- * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
- * @read: access mode selector (1 means read, 0 means write)
- */
-static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
- loff_t *ppos, int nonblock, int read)
-{
- struct vb2_fileio_data *fileio;
- struct vb2_fileio_buf *buf;
- bool is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
- /*
- * When using write() to write data to an output video node the vb2 core
- * should set timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
- * else is able to provide this information with the write() operation.
- */
- bool set_timestamp = !read &&
- (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
- V4L2_BUF_FLAG_TIMESTAMP_COPY;
- int ret, index;
-
- dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
- read ? "read" : "write", (long)*ppos, count,
- nonblock ? "non" : "");
-
- if (!data)
- return -EINVAL;
-
- /*
- * Initialize emulator on first call.
- */
- if (!vb2_fileio_is_active(q)) {
- ret = __vb2_init_fileio(q, read);
- dprintk(3, "vb2_init_fileio result: %d\n", ret);
- if (ret)
- return ret;
- }
- fileio = q->fileio;
-
- /*
- * Check if we need to dequeue the buffer.
- */
- index = fileio->cur_index;
- if (index >= q->num_buffers) {
- /*
- * Call vb2_dqbuf to get buffer back.
- */
- memset(&fileio->b, 0, sizeof(fileio->b));
- fileio->b.type = q->type;
- fileio->b.memory = q->memory;
- if (is_multiplanar) {
- memset(&fileio->p, 0, sizeof(fileio->p));
- fileio->b.m.planes = &fileio->p;
- fileio->b.length = 1;
- }
- ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
- dprintk(5, "vb2_dqbuf result: %d\n", ret);
- if (ret)
- return ret;
- fileio->dq_count += 1;
-
- fileio->cur_index = index = fileio->b.index;
- buf = &fileio->bufs[index];
-
- /*
- * Get number of bytes filled by the driver
- */
- buf->pos = 0;
- buf->queued = 0;
- buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
- : vb2_plane_size(q->bufs[index], 0);
- /* Compensate for data_offset on read in the multiplanar case. */
- if (is_multiplanar && read &&
- fileio->b.m.planes[0].data_offset < buf->size) {
- buf->pos = fileio->b.m.planes[0].data_offset;
- buf->size -= buf->pos;
- }
- } else {
- buf = &fileio->bufs[index];
- }
-
- /*
- * Limit count on last few bytes of the buffer.
- */
- if (buf->pos + count > buf->size) {
- count = buf->size - buf->pos;
- dprintk(5, "reducing read count: %zd\n", count);
- }
-
- /*
- * Transfer data to userspace.
- */
- dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
- count, index, buf->pos);
- if (read)
- ret = copy_to_user(data, buf->vaddr + buf->pos, count);
- else
- ret = copy_from_user(buf->vaddr + buf->pos, data, count);
- if (ret) {
- dprintk(3, "error copying data\n");
- return -EFAULT;
- }
-
- /*
- * Update counters.
- */
- buf->pos += count;
- *ppos += count;
-
- /*
- * Queue next buffer if required.
- */
- if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
- /*
- * Check if this is the last buffer to read.
- */
- if (read && fileio->read_once && fileio->dq_count == 1) {
- dprintk(3, "read limit reached\n");
- return __vb2_cleanup_fileio(q);
- }
-
- /*
- * Call vb2_qbuf and give buffer to the driver.
- */
- memset(&fileio->b, 0, sizeof(fileio->b));
- fileio->b.type = q->type;
- fileio->b.memory = q->memory;
- fileio->b.index = index;
- fileio->b.bytesused = buf->pos;
- if (is_multiplanar) {
- memset(&fileio->p, 0, sizeof(fileio->p));
- fileio->p.bytesused = buf->pos;
- fileio->b.m.planes = &fileio->p;
- fileio->b.length = 1;
- }
- if (set_timestamp)
- v4l2_get_timestamp(&fileio->b.timestamp);
- ret = vb2_internal_qbuf(q, &fileio->b);
- dprintk(5, "vb2_dbuf result: %d\n", ret);
- if (ret)
- return ret;
-
- /*
- * Buffer has been queued, update the status
- */
- buf->pos = 0;
- buf->queued = 1;
- buf->size = vb2_plane_size(q->bufs[index], 0);
- fileio->q_count += 1;
- /*
- * If we are queuing up buffers for the first time, then
- * increase initial_index by one.
- */
- if (fileio->initial_index < q->num_buffers)
- fileio->initial_index++;
- /*
- * The next buffer to use is either a buffer that's going to be
- * queued for the first time (initial_index < q->num_buffers)
- * or it is equal to q->num_buffers, meaning that the next
- * time we need to dequeue a buffer since we've now queued up
- * all the 'first time' buffers.
- */
- fileio->cur_index = fileio->initial_index;
- }
-
- /*
- * Return proper number of bytes processed.
- */
- if (ret == 0)
- ret = count;
- return ret;
-}
-
-size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
- loff_t *ppos, int nonblocking)
-{
- return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
-}
-EXPORT_SYMBOL_GPL(vb2_read);
-
-size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
- loff_t *ppos, int nonblocking)
-{
- return __vb2_perform_fileio(q, (char __user *) data, count,
- ppos, nonblocking, 0);
-}
-EXPORT_SYMBOL_GPL(vb2_write);
-
-struct vb2_threadio_data {
- struct task_struct *thread;
- vb2_thread_fnc fnc;
- void *priv;
- bool stop;
-};
-
-static int vb2_thread(void *data)
-{
- struct vb2_queue *q = data;
- struct vb2_threadio_data *threadio = q->threadio;
- struct vb2_fileio_data *fileio = q->fileio;
- bool set_timestamp = false;
- int prequeue = 0;
- int index = 0;
- int ret = 0;
-
- if (V4L2_TYPE_IS_OUTPUT(q->type)) {
- prequeue = q->num_buffers;
- set_timestamp =
- (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
- V4L2_BUF_FLAG_TIMESTAMP_COPY;
- }
-
- set_freezable();
-
- for (;;) {
- struct vb2_buffer *vb;
-
- /*
- * Call vb2_dqbuf to get buffer back.
- */
- memset(&fileio->b, 0, sizeof(fileio->b));
- fileio->b.type = q->type;
- fileio->b.memory = q->memory;
- if (prequeue) {
- fileio->b.index = index++;
- prequeue--;
- } else {
- call_void_qop(q, wait_finish, q);
- if (!threadio->stop)
- ret = vb2_internal_dqbuf(q, &fileio->b, 0);
- call_void_qop(q, wait_prepare, q);
- dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
- }
- if (ret || threadio->stop)
- break;
- try_to_freeze();
-
- vb = q->bufs[fileio->b.index];
- if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
- if (threadio->fnc(vb, threadio->priv))
- break;
- call_void_qop(q, wait_finish, q);
- if (set_timestamp)
- v4l2_get_timestamp(&fileio->b.timestamp);
- if (!threadio->stop)
- ret = vb2_internal_qbuf(q, &fileio->b);
- call_void_qop(q, wait_prepare, q);
- if (ret || threadio->stop)
- break;
- }
-
- /* Hmm, linux becomes *very* unhappy without this ... */
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- }
- return 0;
-}
-
-/*
- * This function should not be used for anything else but the videobuf2-dvb
- * support. If you think you have another good use-case for this, then please
- * contact the linux-media mailinglist first.
- */
-int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
- const char *thread_name)
-{
- struct vb2_threadio_data *threadio;
- int ret = 0;
-
- if (q->threadio)
- return -EBUSY;
- if (vb2_is_busy(q))
- return -EBUSY;
- if (WARN_ON(q->fileio))
- return -EBUSY;
-
- threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
- if (threadio == NULL)
- return -ENOMEM;
- threadio->fnc = fnc;
- threadio->priv = priv;
-
- ret = __vb2_init_fileio(q, !V4L2_TYPE_IS_OUTPUT(q->type));
- dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
- if (ret)
- goto nomem;
- q->threadio = threadio;
- threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
- if (IS_ERR(threadio->thread)) {
- ret = PTR_ERR(threadio->thread);
- threadio->thread = NULL;
- goto nothread;
- }
- return 0;
-
-nothread:
- __vb2_cleanup_fileio(q);
-nomem:
- kfree(threadio);
- return ret;
-}
-EXPORT_SYMBOL_GPL(vb2_thread_start);
-
-int vb2_thread_stop(struct vb2_queue *q)
-{
- struct vb2_threadio_data *threadio = q->threadio;
- int err;
-
- if (threadio == NULL)
- return 0;
- threadio->stop = true;
- /* Wake up all pending sleeps in the thread */
- vb2_queue_error(q);
- err = kthread_stop(threadio->thread);
- __vb2_cleanup_fileio(q);
- threadio->thread = NULL;
- kfree(threadio);
- q->threadio = NULL;
- return err;
-}
-EXPORT_SYMBOL_GPL(vb2_thread_stop);
-
-/*
- * The following functions are not part of the vb2 core API, but are helper
- * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
- * and struct vb2_ops.
- * They contain boilerplate code that most if not all drivers have to do
- * and so they simplify the driver code.
- */
-
-/* The queue is busy if there is a owner and you are not that owner. */
-static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
-{
- return vdev->queue->owner && vdev->queue->owner != file->private_data;
-}
-
-/* vb2 ioctl helpers */
-
-int vb2_ioctl_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct video_device *vdev = video_devdata(file);
- int res = __verify_memory_type(vdev->queue, p->memory, p->type);
-
- if (res)
- return res;
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- res = __reqbufs(vdev->queue, p);
- /* If count == 0, then the owner has released all buffers and he
- is no longer owner of the queue. Otherwise we have a new owner. */
- if (res == 0)
- vdev->queue->owner = p->count ? file->private_data : NULL;
- return res;
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
-
-int vb2_ioctl_create_bufs(struct file *file, void *priv,
- struct v4l2_create_buffers *p)
-{
- struct video_device *vdev = video_devdata(file);
- int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
-
- p->index = vdev->queue->num_buffers;
- /* If count == 0, then just check if memory and type are valid.
- Any -EBUSY result from __verify_memory_type can be mapped to 0. */
- if (p->count == 0)
- return res != -EBUSY ? res : 0;
- if (res)
- return res;
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- res = __create_bufs(vdev->queue, p);
- if (res == 0)
- vdev->queue->owner = file->private_data;
- return res;
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
-
-int vb2_ioctl_prepare_buf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- return vb2_prepare_buf(vdev->queue, p);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
-
-int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct video_device *vdev = video_devdata(file);
-
- /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
- return vb2_querybuf(vdev->queue, p);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
-
-int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- return vb2_qbuf(vdev->queue, p);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
-
-int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
-
-int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- return vb2_streamon(vdev->queue, i);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
-
-int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- return vb2_streamoff(vdev->queue, i);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
-
-int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- return vb2_expbuf(vdev->queue, p);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
-
-/* v4l2_file_operations helpers */
-
-int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct video_device *vdev = video_devdata(file);
-
- return vb2_mmap(vdev->queue, vma);
-}
-EXPORT_SYMBOL_GPL(vb2_fop_mmap);
-
-int _vb2_fop_release(struct file *file, struct mutex *lock)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (lock)
- mutex_lock(lock);
- if (file->private_data == vdev->queue->owner) {
- vb2_queue_release(vdev->queue);
- vdev->queue->owner = NULL;
- }
- if (lock)
- mutex_unlock(lock);
- return v4l2_fh_release(file);
-}
-EXPORT_SYMBOL_GPL(_vb2_fop_release);
-
-int vb2_fop_release(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
- struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
-
- return _vb2_fop_release(file, lock);
-}
-EXPORT_SYMBOL_GPL(vb2_fop_release);
-
-ssize_t vb2_fop_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct video_device *vdev = video_devdata(file);
- struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
- int err = -EBUSY;
-
- if (!(vdev->queue->io_modes & VB2_WRITE))
- return -EINVAL;
- if (lock && mutex_lock_interruptible(lock))
- return -ERESTARTSYS;
- if (vb2_queue_is_busy(vdev, file))
- goto exit;
- err = vb2_write(vdev->queue, buf, count, ppos,
- file->f_flags & O_NONBLOCK);
- if (vdev->queue->fileio)
- vdev->queue->owner = file->private_data;
-exit:
- if (lock)
- mutex_unlock(lock);
- return err;
-}
-EXPORT_SYMBOL_GPL(vb2_fop_write);
-
-ssize_t vb2_fop_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct video_device *vdev = video_devdata(file);
- struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
- int err = -EBUSY;
-
- if (!(vdev->queue->io_modes & VB2_READ))
- return -EINVAL;
- if (lock && mutex_lock_interruptible(lock))
- return -ERESTARTSYS;
- if (vb2_queue_is_busy(vdev, file))
- goto exit;
- err = vb2_read(vdev->queue, buf, count, ppos,
- file->f_flags & O_NONBLOCK);
- if (vdev->queue->fileio)
- vdev->queue->owner = file->private_data;
-exit:
- if (lock)
- mutex_unlock(lock);
- return err;
-}
-EXPORT_SYMBOL_GPL(vb2_fop_read);
-
-unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
-{
- struct video_device *vdev = video_devdata(file);
- struct vb2_queue *q = vdev->queue;
- struct mutex *lock = q->lock ? q->lock : vdev->lock;
- unsigned res;
- void *fileio;
-
- /*
- * If this helper doesn't know how to lock, then you shouldn't be using
- * it but you should write your own.
- */
- WARN_ON(!lock);
-
- if (lock && mutex_lock_interruptible(lock))
- return POLLERR;
-
- fileio = q->fileio;
-
- res = vb2_poll(vdev->queue, file, wait);
-
- /* If fileio was started, then we have a new queue owner. */
- if (!fileio && q->fileio)
- q->owner = file->private_data;
- if (lock)
- mutex_unlock(lock);
- return res;
-}
-EXPORT_SYMBOL_GPL(vb2_fop_poll);
-
-#ifndef CONFIG_MMU
-unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct video_device *vdev = video_devdata(file);
-
- return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
-}
-EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
-#endif
-
-/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
-
-void vb2_ops_wait_prepare(struct vb2_queue *vq)
-{
- mutex_unlock(vq->lock);
-}
-EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
-
-void vb2_ops_wait_finish(struct vb2_queue *vq)
-{
- mutex_lock(vq->lock);
-}
-EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
+EXPORT_SYMBOL_GPL(vb2_core_queue_release);
MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 2397ceb1dc6b..c33127284cfe 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-memops.h>
@@ -100,7 +100,8 @@ static void vb2_dc_prepare(void *buf_priv)
if (!sgt || buf->db_attach)
return;
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir);
}
static void vb2_dc_finish(void *buf_priv)
@@ -112,7 +113,7 @@ static void vb2_dc_finish(void *buf_priv)
if (!sgt || buf->db_attach)
return;
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
}
/*********************************************/
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index be7bd6535c9d..9985c89f0513 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-memops.h>
#include <media/videobuf2-dma-sg.h>
@@ -210,7 +210,8 @@ static void vb2_dma_sg_prepare(void *buf_priv)
if (buf->db_attach)
return;
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir);
}
static void vb2_dma_sg_finish(void *buf_priv)
@@ -222,7 +223,7 @@ static void vb2_dma_sg_finish(void *buf_priv)
if (buf->db_attach)
return;
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
}
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
diff --git a/drivers/media/v4l2-core/videobuf2-internal.h b/drivers/media/v4l2-core/videobuf2-internal.h
new file mode 100644
index 000000000000..79018c749282
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf2-internal.h
@@ -0,0 +1,161 @@
+#ifndef _MEDIA_VIDEOBUF2_INTERNAL_H
+#define _MEDIA_VIDEOBUF2_INTERNAL_H
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <media/videobuf2-core.h>
+
+extern int vb2_debug;
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (vb2_debug >= level) \
+ pr_info("vb2: %s: " fmt, __func__, ## arg); \
+ } while (0)
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+
+/*
+ * If advanced debugging is on, then count how often each op is called
+ * successfully, which can either be per-buffer or per-queue.
+ *
+ * This makes it easy to check that the 'init' and 'cleanup'
+ * (and variations thereof) stay balanced.
+ */
+
+#define log_memop(vb, op) \
+ dprintk(2, "call_memop(%p, %d, %s)%s\n", \
+ (vb)->vb2_queue, (vb)->index, #op, \
+ (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
+
+#define call_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ int err; \
+ \
+ log_memop(vb, op); \
+ err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
+ if (!err) \
+ (vb)->cnt_mem_ ## op++; \
+ err; \
+})
+
+#define call_ptr_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ void *ptr; \
+ \
+ log_memop(vb, op); \
+ ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
+ if (!IS_ERR_OR_NULL(ptr)) \
+ (vb)->cnt_mem_ ## op++; \
+ ptr; \
+})
+
+#define call_void_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ \
+ log_memop(vb, op); \
+ if (_q->mem_ops->op) \
+ _q->mem_ops->op(args); \
+ (vb)->cnt_mem_ ## op++; \
+})
+
+#define log_qop(q, op) \
+ dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
+ (q)->ops->op ? "" : " (nop)")
+
+#define call_qop(q, op, args...) \
+({ \
+ int err; \
+ \
+ log_qop(q, op); \
+ err = (q)->ops->op ? (q)->ops->op(args) : 0; \
+ if (!err) \
+ (q)->cnt_ ## op++; \
+ err; \
+})
+
+#define call_void_qop(q, op, args...) \
+({ \
+ log_qop(q, op); \
+ if ((q)->ops->op) \
+ (q)->ops->op(args); \
+ (q)->cnt_ ## op++; \
+})
+
+#define log_vb_qop(vb, op, args...) \
+ dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
+ (vb)->vb2_queue, (vb)->index, #op, \
+ (vb)->vb2_queue->ops->op ? "" : " (nop)")
+
+#define call_vb_qop(vb, op, args...) \
+({ \
+ int err; \
+ \
+ log_vb_qop(vb, op); \
+ err = (vb)->vb2_queue->ops->op ? \
+ (vb)->vb2_queue->ops->op(args) : 0; \
+ if (!err) \
+ (vb)->cnt_ ## op++; \
+ err; \
+})
+
+#define call_void_vb_qop(vb, op, args...) \
+({ \
+ log_vb_qop(vb, op); \
+ if ((vb)->vb2_queue->ops->op) \
+ (vb)->vb2_queue->ops->op(args); \
+ (vb)->cnt_ ## op++; \
+})
+
+#else
+
+#define call_memop(vb, op, args...) \
+ ((vb)->vb2_queue->mem_ops->op ? \
+ (vb)->vb2_queue->mem_ops->op(args) : 0)
+
+#define call_ptr_memop(vb, op, args...) \
+ ((vb)->vb2_queue->mem_ops->op ? \
+ (vb)->vb2_queue->mem_ops->op(args) : NULL)
+
+#define call_void_memop(vb, op, args...) \
+ do { \
+ if ((vb)->vb2_queue->mem_ops->op) \
+ (vb)->vb2_queue->mem_ops->op(args); \
+ } while (0)
+
+#define call_qop(q, op, args...) \
+ ((q)->ops->op ? (q)->ops->op(args) : 0)
+
+#define call_void_qop(q, op, args...) \
+ do { \
+ if ((q)->ops->op) \
+ (q)->ops->op(args); \
+ } while (0)
+
+#define call_vb_qop(vb, op, args...) \
+ ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
+
+#define call_void_vb_qop(vb, op, args...) \
+ do { \
+ if ((vb)->vb2_queue->ops->op) \
+ (vb)->vb2_queue->ops->op(args); \
+ } while (0)
+
+#endif
+
+#define call_bufop(q, op, args...) \
+({ \
+ int ret = 0; \
+ if (q && q->buf_ops && q->buf_ops->op) \
+ ret = q->buf_ops->op(args); \
+ ret; \
+})
+
+bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb);
+int vb2_verify_memory_type(struct vb2_queue *q,
+ enum vb2_memory memory, unsigned int type);
+#endif /* _MEDIA_VIDEOBUF2_INTERNAL_H */
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 48c6a49c4928..dbec5923fcf0 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -19,7 +19,7 @@
#include <linux/sched.h>
#include <linux/file.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-memops.h>
/**
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
new file mode 100644
index 000000000000..27b4b9e7c0c2
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -0,0 +1,1661 @@
+/*
+ * videobuf2-v4l2.c - V4L2 driver helper framework
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * The vb2_thread implementation was based on code from videobuf-dvb.c:
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+
+#include <media/videobuf2-v4l2.h>
+
+#include "videobuf2-internal.h"
+
+/* Flags that are set by the vb2 core */
+#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
+ V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
+ V4L2_BUF_FLAG_PREPARED | \
+ V4L2_BUF_FLAG_TIMESTAMP_MASK)
+/* Output buffer flags that should be passed on to the driver */
+#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
+ V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
+
+/**
+ * __verify_planes_array() - verify that the planes array passed in struct
+ * v4l2_buffer from userspace can be safely used
+ */
+static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
+ return 0;
+
+ /* Is memory for copying plane information present? */
+ if (NULL == b->m.planes) {
+ dprintk(1, "multi-planar buffer passed but "
+ "planes array not provided\n");
+ return -EINVAL;
+ }
+
+ if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
+ dprintk(1, "incorrect planes array length, "
+ "expected %d, got %d\n", vb->num_planes, b->length);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * __verify_length() - Verify that the bytesused value for each plane fits in
+ * the plane length and that the data offset doesn't exceed the bytesused value.
+ */
+static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ unsigned int length;
+ unsigned int bytesused;
+ unsigned int plane;
+
+ if (!V4L2_TYPE_IS_OUTPUT(b->type))
+ return 0;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ length = (b->memory == VB2_MEMORY_USERPTR ||
+ b->memory == VB2_MEMORY_DMABUF)
+ ? b->m.planes[plane].length
+ : vb->planes[plane].length;
+ bytesused = b->m.planes[plane].bytesused
+ ? b->m.planes[plane].bytesused : length;
+
+ if (b->m.planes[plane].bytesused > length)
+ return -EINVAL;
+
+ if (b->m.planes[plane].data_offset > 0 &&
+ b->m.planes[plane].data_offset >= bytesused)
+ return -EINVAL;
+ }
+ } else {
+ length = (b->memory == VB2_MEMORY_USERPTR)
+ ? b->length : vb->planes[0].length;
+
+ if (b->bytesused > length)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __set_timestamp(struct vb2_buffer *vb, const void *pb)
+{
+ const struct v4l2_buffer *b = pb;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *q = vb->vb2_queue;
+
+ if (q->is_output) {
+ /*
+ * For output buffers copy the timestamp if needed,
+ * and the timecode field and flag if needed.
+ */
+ if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_COPY)
+ vbuf->timestamp = b->timestamp;
+ vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
+ if (b->flags & V4L2_BUF_FLAG_TIMECODE)
+ vbuf->timecode = b->timecode;
+ }
+ return 0;
+};
+
+static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
+{
+ static bool check_once;
+
+ if (check_once)
+ return;
+
+ check_once = true;
+ WARN_ON(1);
+
+ pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+ if (vb->vb2_queue->allow_zero_bytesused)
+ pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+ else
+ pr_warn("use the actual size instead.\n");
+}
+
+static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
+ const char *opname)
+{
+ if (b->type != q->type) {
+ dprintk(1, "%s: invalid buffer type\n", opname);
+ return -EINVAL;
+ }
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "%s: buffer index out of range\n", opname);
+ return -EINVAL;
+ }
+
+ if (q->bufs[b->index] == NULL) {
+ /* Should never happen */
+ dprintk(1, "%s: buffer is NULL\n", opname);
+ return -EINVAL;
+ }
+
+ if (b->memory != q->memory) {
+ dprintk(1, "%s: invalid memory type\n", opname);
+ return -EINVAL;
+ }
+
+ return __verify_planes_array(q->bufs[b->index], b);
+}
+
+/**
+ * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
+ * returned to userspace
+ */
+static int __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
+{
+ struct v4l2_buffer *b = pb;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
+
+ /* Copy back data such as timestamp, flags, etc. */
+ b->index = vb->index;
+ b->type = vb->type;
+ b->memory = vb->memory;
+ b->bytesused = 0;
+
+ b->flags = vbuf->flags;
+ b->field = vbuf->field;
+ b->timestamp = vbuf->timestamp;
+ b->timecode = vbuf->timecode;
+ b->sequence = vbuf->sequence;
+ b->reserved2 = 0;
+ b->reserved = 0;
+
+ if (q->is_multiplanar) {
+ /*
+ * Fill in plane-related data if userspace provided an array
+ * for it. The caller has already verified memory and size.
+ */
+ b->length = vb->num_planes;
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct v4l2_plane *pdst = &b->m.planes[plane];
+ struct vb2_plane *psrc = &vb->planes[plane];
+
+ pdst->bytesused = psrc->bytesused;
+ pdst->length = psrc->length;
+ if (q->memory == VB2_MEMORY_MMAP)
+ pdst->m.mem_offset = psrc->m.offset;
+ else if (q->memory == VB2_MEMORY_USERPTR)
+ pdst->m.userptr = psrc->m.userptr;
+ else if (q->memory == VB2_MEMORY_DMABUF)
+ pdst->m.fd = psrc->m.fd;
+ pdst->data_offset = psrc->data_offset;
+ memset(pdst->reserved, 0, sizeof(pdst->reserved));
+ }
+ } else {
+ /*
+ * We use length and offset in v4l2_planes array even for
+ * single-planar buffers, but userspace does not.
+ */
+ b->length = vb->planes[0].length;
+ b->bytesused = vb->planes[0].bytesused;
+ if (q->memory == VB2_MEMORY_MMAP)
+ b->m.offset = vb->planes[0].m.offset;
+ else if (q->memory == VB2_MEMORY_USERPTR)
+ b->m.userptr = vb->planes[0].m.userptr;
+ else if (q->memory == VB2_MEMORY_DMABUF)
+ b->m.fd = vb->planes[0].m.fd;
+ }
+
+ /*
+ * Clear any buffer state related flags.
+ */
+ b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
+ b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
+ if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
+ V4L2_BUF_FLAG_TIMESTAMP_COPY) {
+ /*
+ * For non-COPY timestamps, drop timestamp source bits
+ * and obtain the timestamp source from the queue.
+ */
+ b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ }
+
+ switch (vb->state) {
+ case VB2_BUF_STATE_QUEUED:
+ case VB2_BUF_STATE_ACTIVE:
+ b->flags |= V4L2_BUF_FLAG_QUEUED;
+ break;
+ case VB2_BUF_STATE_ERROR:
+ b->flags |= V4L2_BUF_FLAG_ERROR;
+ /* fall through */
+ case VB2_BUF_STATE_DONE:
+ b->flags |= V4L2_BUF_FLAG_DONE;
+ break;
+ case VB2_BUF_STATE_PREPARED:
+ b->flags |= V4L2_BUF_FLAG_PREPARED;
+ break;
+ case VB2_BUF_STATE_PREPARING:
+ case VB2_BUF_STATE_DEQUEUED:
+ case VB2_BUF_STATE_REQUEUEING:
+ /* nothing */
+ break;
+ }
+
+ if (vb2_buffer_in_use(q, vb))
+ b->flags |= V4L2_BUF_FLAG_MAPPED;
+
+ return 0;
+}
+
+/**
+ * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
+ * v4l2_buffer by the userspace. It also verifies that struct
+ * v4l2_buffer has a valid number of planes.
+ */
+static int __fill_vb2_buffer(struct vb2_buffer *vb,
+ const void *pb, struct vb2_plane *planes)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ const struct v4l2_buffer *b = pb;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ unsigned int plane;
+ int ret;
+
+ ret = __verify_length(vb, b);
+ if (ret < 0) {
+ dprintk(1, "plane parameters verification failed: %d\n", ret);
+ return ret;
+ }
+ if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
+ /*
+ * If the format's field is ALTERNATE, then the buffer's field
+ * should be either TOP or BOTTOM, not ALTERNATE since that
+ * makes no sense. The driver has to know whether the
+ * buffer represents a top or a bottom field in order to
+ * program any DMA correctly. Using ALTERNATE is wrong, since
+ * that just says that it is either a top or a bottom field,
+ * but not which of the two it is.
+ */
+ dprintk(1, "the field is incorrectly set to ALTERNATE "
+ "for an output buffer\n");
+ return -EINVAL;
+ }
+ vbuf->timestamp.tv_sec = 0;
+ vbuf->timestamp.tv_usec = 0;
+ vbuf->sequence = 0;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+ if (b->memory == VB2_MEMORY_USERPTR) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ planes[plane].m.userptr =
+ b->m.planes[plane].m.userptr;
+ planes[plane].length =
+ b->m.planes[plane].length;
+ }
+ }
+ if (b->memory == VB2_MEMORY_DMABUF) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ planes[plane].m.fd =
+ b->m.planes[plane].m.fd;
+ planes[plane].length =
+ b->m.planes[plane].length;
+ }
+ }
+
+ /* Fill in driver-provided information for OUTPUT types */
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * Will have to go up to b->length when API starts
+ * accepting variable number of planes.
+ *
+ * If bytesused == 0 for the output buffer, then fall
+ * back to the full buffer size. In that case
+ * userspace clearly never bothered to set it and
+ * it's a safe assumption that they really meant to
+ * use the full plane sizes.
+ *
+ * Some drivers, e.g. old codec drivers, use bytesused == 0
+ * as a way to indicate that streaming is finished.
+ * In that case, the driver should use the
+ * allow_zero_bytesused flag to keep old userspace
+ * applications working.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct vb2_plane *pdst = &planes[plane];
+ struct v4l2_plane *psrc = &b->m.planes[plane];
+
+ if (psrc->bytesused == 0)
+ vb2_warn_zero_bytesused(vb);
+
+ if (vb->vb2_queue->allow_zero_bytesused)
+ pdst->bytesused = psrc->bytesused;
+ else
+ pdst->bytesused = psrc->bytesused ?
+ psrc->bytesused : pdst->length;
+ pdst->data_offset = psrc->data_offset;
+ }
+ }
+ } else {
+ /*
+ * Single-planar buffers do not use planes array,
+ * so fill in relevant v4l2_buffer struct fields instead.
+ * In videobuf we use our internal V4l2_planes struct for
+ * single-planar buffers as well, for simplicity.
+ *
+ * If bytesused == 0 for the output buffer, then fall back
+ * to the full buffer size as that's a sensible default.
+ *
+ * Some drivers, e.g. old codec drivers, use bytesused == 0 as
+ * a way to indicate that streaming is finished. In that case,
+ * the driver should use the allow_zero_bytesused flag to keep
+ * old userspace applications working.
+ */
+ if (b->memory == VB2_MEMORY_USERPTR) {
+ planes[0].m.userptr = b->m.userptr;
+ planes[0].length = b->length;
+ }
+
+ if (b->memory == VB2_MEMORY_DMABUF) {
+ planes[0].m.fd = b->m.fd;
+ planes[0].length = b->length;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ if (b->bytesused == 0)
+ vb2_warn_zero_bytesused(vb);
+
+ if (vb->vb2_queue->allow_zero_bytesused)
+ planes[0].bytesused = b->bytesused;
+ else
+ planes[0].bytesused = b->bytesused ?
+ b->bytesused : planes[0].length;
+ } else
+ planes[0].bytesused = 0;
+
+ }
+
+ /* Zero flags that the vb2 core handles */
+ vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
+ if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
+ V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * Non-COPY timestamps and non-OUTPUT queues will get
+ * their timestamp and timestamp source flags from the
+ * queue.
+ */
+ vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * For output buffers mask out the timecode flag:
+ * this will be handled later in vb2_internal_qbuf().
+ * The 'field' is valid metadata for this output buffer
+ * and so that needs to be copied here.
+ */
+ vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
+ vbuf->field = b->field;
+ } else {
+ /* Zero any output buffer flags as this is a capture buffer */
+ vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
+ }
+
+ return 0;
+}
+
+static const struct vb2_buf_ops v4l2_buf_ops = {
+ .fill_user_buffer = __fill_v4l2_buffer,
+ .fill_vb2_buffer = __fill_vb2_buffer,
+ .set_timestamp = __set_timestamp,
+};
+
+/**
+ * vb2_querybuf() - query video buffer information
+ * @q: videobuf queue
+ * @b: buffer struct passed from userspace to vidioc_querybuf handler
+ * in driver
+ *
+ * Should be called from vidioc_querybuf ioctl handler in driver.
+ * This function will verify the passed v4l2_buffer structure and fill the
+ * relevant information for the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_querybuf handler in driver.
+ */
+int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ struct vb2_buffer *vb;
+ int ret;
+
+ if (b->type != q->type) {
+ dprintk(1, "wrong buffer type\n");
+ return -EINVAL;
+ }
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "buffer index out of range\n");
+ return -EINVAL;
+ }
+ vb = q->bufs[b->index];
+ ret = __verify_planes_array(vb, b);
+
+ return ret ? ret : vb2_core_querybuf(q, b->index, b);
+}
+EXPORT_SYMBOL(vb2_querybuf);
+
+/**
+ * vb2_reqbufs() - Wrapper for vb2_core_reqbufs() that also verifies
+ * the memory and type values.
+ * @q: videobuf2 queue
+ * @req: struct passed from userspace to vidioc_reqbufs handler
+ * in driver
+ */
+int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
+{
+ int ret = vb2_verify_memory_type(q, req->memory, req->type);
+
+ return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
+}
+EXPORT_SYMBOL_GPL(vb2_reqbufs);
+
+/**
+ * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_prepare_buf
+ * handler in driver
+ *
+ * Should be called from vidioc_prepare_buf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_prepare callback in the driver (if provided), in which
+ * driver-specific buffer initialization can be performed,
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_prepare_buf handler in driver.
+ */
+int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ int ret;
+
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+
+ ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
+
+ return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
+}
+EXPORT_SYMBOL_GPL(vb2_prepare_buf);
+
+/**
+ * vb2_create_bufs() - Wrapper for vb2_core_create_bufs() that also verifies
+ * the memory and type values.
+ * @q: videobuf2 queue
+ * @create: creation parameters, passed from userspace to vidioc_create_bufs
+ * handler in driver
+ */
+int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
+{
+ int ret = vb2_verify_memory_type(q, create->memory,
+ create->format.type);
+
+ create->index = q->num_buffers;
+ if (create->count == 0)
+ return ret != -EBUSY ? ret : 0;
+ return ret ? ret : vb2_core_create_bufs(q, create->memory,
+ &create->count, &create->format);
+}
+EXPORT_SYMBOL_GPL(vb2_create_bufs);
+
+static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
+
+ return ret ? ret : vb2_core_qbuf(q, b->index, b);
+}
+
+/**
+ * vb2_qbuf() - Queue a buffer from userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_qbuf handler
+ * in driver
+ *
+ * Should be called from vidioc_qbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
+ * which driver-specific buffer initialization can be performed,
+ * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
+ * callback for processing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_qbuf handler in driver.
+ */
+int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+
+ return vb2_internal_qbuf(q, b);
+}
+EXPORT_SYMBOL_GPL(vb2_qbuf);
+
+static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b,
+ bool nonblocking)
+{
+ int ret;
+
+ if (b->type != q->type) {
+ dprintk(1, "invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ ret = vb2_core_dqbuf(q, b, nonblocking);
+
+ if (!ret && !q->is_output &&
+ b->flags & V4L2_BUF_FLAG_LAST)
+ q->last_buffer_dequeued = true;
+
+ return ret;
+}
+
+/**
+ * vb2_dqbuf() - Dequeue a buffer to the userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_dqbuf handler
+ * in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ * buffers ready for dequeuing are present. Normally the driver
+ * would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from vidioc_dqbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_finish callback in the driver (if provided), in which
+ * driver can perform any additional operations that may be required before
+ * returning the buffer to userspace, such as cache sync,
+ * 3) the buffer struct members are filled with relevant information for
+ * the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_dqbuf handler in driver.
+ */
+int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+{
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_internal_dqbuf(q, b, nonblocking);
+}
+EXPORT_SYMBOL_GPL(vb2_dqbuf);
+
+/**
+ * vb2_streamon - start streaming
+ * @q: videobuf2 queue
+ * @type: type argument passed from userspace to vidioc_streamon handler
+ *
+ * Should be called from vidioc_streamon handler of a driver.
+ * This function:
+ * 1) verifies current state
+ * 2) passes any previously queued buffers to the driver and starts streaming
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamon handler in the driver.
+ */
+int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_core_streamon(q, type);
+}
+EXPORT_SYMBOL_GPL(vb2_streamon);
+
+/**
+ * vb2_streamoff - stop streaming
+ * @q: videobuf2 queue
+ * @type: type argument passed from userspace to vidioc_streamoff handler
+ *
+ * Should be called from vidioc_streamoff handler of a driver.
+ * This function:
+ * 1) verifies current state,
+ * 2) stop streaming and dequeues any queued buffers, including those previously
+ * passed to the driver (after waiting for the driver to finish).
+ *
+ * This call can be used for pausing playback.
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamoff handler in the driver
+ */
+int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_core_streamoff(q, type);
+}
+EXPORT_SYMBOL_GPL(vb2_streamoff);
+
+/**
+ * vb2_expbuf() - Export a buffer as a file descriptor
+ * @q: videobuf2 queue
+ * @eb: export buffer structure passed from userspace to vidioc_expbuf
+ * handler in driver
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_expbuf handler in driver.
+ */
+int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
+{
+ return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
+ eb->plane, eb->flags);
+}
+EXPORT_SYMBOL_GPL(vb2_expbuf);
+
+/**
+ * vb2_queue_init() - initialize a videobuf2 queue
+ * @q: videobuf2 queue; this structure should be allocated in driver
+ *
+ * The vb2_queue structure should be allocated by the driver. The driver is
+ * responsible of clearing it's content and setting initial values for some
+ * required entries before calling this function.
+ * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
+ * to the struct vb2_queue description in include/media/videobuf2-core.h
+ * for more information.
+ */
+int vb2_queue_init(struct vb2_queue *q)
+{
+ /*
+ * Sanity check
+ */
+ if (WARN_ON(!q) ||
+ WARN_ON(q->timestamp_flags &
+ ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
+ return -EINVAL;
+
+ /* Warn that the driver should choose an appropriate timestamp type */
+ WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
+
+ /* Warn that vb2_memory should match with v4l2_memory */
+ if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
+ || WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
+ || WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
+ return -EINVAL;
+
+ if (q->buf_struct_size == 0)
+ q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
+
+ q->buf_ops = &v4l2_buf_ops;
+ q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
+ q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
+
+ return vb2_core_queue_init(q);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_init);
+
+static int __vb2_init_fileio(struct vb2_queue *q, int read);
+static int __vb2_cleanup_fileio(struct vb2_queue *q);
+
+/**
+ * vb2_queue_release() - stop streaming, release the queue and free memory
+ * @q: videobuf2 queue
+ *
+ * This function stops streaming and performs necessary clean ups, including
+ * freeing video buffer memory. The driver is responsible for freeing
+ * the vb2_queue structure itself.
+ */
+void vb2_queue_release(struct vb2_queue *q)
+{
+ __vb2_cleanup_fileio(q);
+ vb2_core_queue_release(q);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_release);
+
+/**
+ * vb2_poll() - implements poll userspace operation
+ * @q: videobuf2 queue
+ * @file: file argument passed to the poll file operation handler
+ * @wait: wait argument passed to the poll file operation handler
+ *
+ * This function implements poll file operation handler for a driver.
+ * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
+ * be informed that the file descriptor of a video device is available for
+ * reading.
+ * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
+ * will be reported as available for writing.
+ *
+ * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
+ * pending events.
+ *
+ * The return values from this function are intended to be directly returned
+ * from poll handler in driver.
+ */
+unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+{
+ struct video_device *vfd = video_devdata(file);
+ unsigned long req_events = poll_requested_events(wait);
+ struct vb2_buffer *vb = NULL;
+ unsigned int res = 0;
+ unsigned long flags;
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
+ struct v4l2_fh *fh = file->private_data;
+
+ if (v4l2_event_pending(fh))
+ res = POLLPRI;
+ else if (req_events & POLLPRI)
+ poll_wait(file, &fh->wait, wait);
+ }
+
+ if (!q->is_output && !(req_events & (POLLIN | POLLRDNORM)))
+ return res;
+ if (q->is_output && !(req_events & (POLLOUT | POLLWRNORM)))
+ return res;
+
+ /*
+ * Start file I/O emulator only if streaming API has not been used yet.
+ */
+ if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
+ if (!q->is_output && (q->io_modes & VB2_READ) &&
+ (req_events & (POLLIN | POLLRDNORM))) {
+ if (__vb2_init_fileio(q, 1))
+ return res | POLLERR;
+ }
+ if (q->is_output && (q->io_modes & VB2_WRITE) &&
+ (req_events & (POLLOUT | POLLWRNORM))) {
+ if (__vb2_init_fileio(q, 0))
+ return res | POLLERR;
+ /*
+ * Write to OUTPUT queue can be done immediately.
+ */
+ return res | POLLOUT | POLLWRNORM;
+ }
+ }
+
+ /*
+ * There is nothing to wait for if the queue isn't streaming, or if the
+ * error flag is set.
+ */
+ if (!vb2_is_streaming(q) || q->error)
+ return res | POLLERR;
+ /*
+ * For compatibility with vb1: if QBUF hasn't been called yet, then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ */
+ if (q->waiting_for_buffers)
+ return res | POLLERR;
+
+ /*
+ * For output streams you can write as long as there are fewer buffers
+ * queued than there are buffers available.
+ */
+ if (q->is_output && q->queued_count < q->num_buffers)
+ return res | POLLOUT | POLLWRNORM;
+
+ if (list_empty(&q->done_list)) {
+ /*
+ * If the last buffer was dequeued from a capture queue,
+ * return immediately. DQBUF will return -EPIPE.
+ */
+ if (q->last_buffer_dequeued)
+ return res | POLLIN | POLLRDNORM;
+
+ poll_wait(file, &q->done_wq, wait);
+ }
+
+ /*
+ * Take first buffer available for dequeuing.
+ */
+ spin_lock_irqsave(&q->done_lock, flags);
+ if (!list_empty(&q->done_list))
+ vb = list_first_entry(&q->done_list, struct vb2_buffer,
+ done_entry);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ if (vb && (vb->state == VB2_BUF_STATE_DONE
+ || vb->state == VB2_BUF_STATE_ERROR)) {
+ return (q->is_output) ?
+ res | POLLOUT | POLLWRNORM :
+ res | POLLIN | POLLRDNORM;
+ }
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_poll);
+
+/**
+ * struct vb2_fileio_buf - buffer context used by file io emulator
+ *
+ * vb2 provides a compatibility layer and emulator of file io (read and
+ * write) calls on top of streaming API. This structure is used for
+ * tracking context related to the buffers.
+ */
+struct vb2_fileio_buf {
+ void *vaddr;
+ unsigned int size;
+ unsigned int pos;
+ unsigned int queued:1;
+};
+
+/**
+ * struct vb2_fileio_data - queue context used by file io emulator
+ *
+ * @cur_index: the index of the buffer currently being read from or
+ * written to. If equal to q->num_buffers then a new buffer
+ * must be dequeued.
+ * @initial_index: in the read() case all buffers are queued up immediately
+ * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
+ * buffers. However, in the write() case no buffers are initially
+ * queued, instead whenever a buffer is full it is queued up by
+ * __vb2_perform_fileio(). Only once all available buffers have
+ * been queued up will __vb2_perform_fileio() start to dequeue
+ * buffers. This means that initially __vb2_perform_fileio()
+ * needs to know what buffer index to use when it is queuing up
+ * the buffers for the first time. That initial index is stored
+ * in this field. Once it is equal to q->num_buffers all
+ * available buffers have been queued and __vb2_perform_fileio()
+ * should start the normal dequeue/queue cycle.
+ *
+ * vb2 provides a compatibility layer and emulator of file io (read and
+ * write) calls on top of streaming API. For proper operation it required
+ * this structure to save the driver state between each call of the read
+ * or write function.
+ */
+struct vb2_fileio_data {
+ struct v4l2_requestbuffers req;
+ struct v4l2_plane p;
+ struct v4l2_buffer b;
+ struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
+ unsigned int cur_index;
+ unsigned int initial_index;
+ unsigned int q_count;
+ unsigned int dq_count;
+ unsigned read_once:1;
+ unsigned write_immediately:1;
+};
+
+/**
+ * __vb2_init_fileio() - initialize file io emulator
+ * @q: videobuf2 queue
+ * @read: mode selector (1 means read, 0 means write)
+ */
+static int __vb2_init_fileio(struct vb2_queue *q, int read)
+{
+ struct vb2_fileio_data *fileio;
+ int i, ret;
+ unsigned int count = 0;
+
+ /*
+ * Sanity check
+ */
+ if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
+ (!read && !(q->io_modes & VB2_WRITE))))
+ return -EINVAL;
+
+ /*
+ * Check if device supports mapping buffers to kernel virtual space.
+ */
+ if (!q->mem_ops->vaddr)
+ return -EBUSY;
+
+ /*
+ * Check if streaming api has not been already activated.
+ */
+ if (q->streaming || q->num_buffers > 0)
+ return -EBUSY;
+
+ /*
+ * Start with count 1, driver can increase it in queue_setup()
+ */
+ count = 1;
+
+ dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
+ (read) ? "read" : "write", count, q->fileio_read_once,
+ q->fileio_write_immediately);
+
+ fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
+ if (fileio == NULL)
+ return -ENOMEM;
+
+ fileio->read_once = q->fileio_read_once;
+ fileio->write_immediately = q->fileio_write_immediately;
+
+ /*
+ * Request buffers and use MMAP type to force driver
+ * to allocate buffers by itself.
+ */
+ fileio->req.count = count;
+ fileio->req.memory = VB2_MEMORY_MMAP;
+ fileio->req.type = q->type;
+ q->fileio = fileio;
+ ret = vb2_core_reqbufs(q, fileio->req.memory, &fileio->req.count);
+ if (ret)
+ goto err_kfree;
+
+ /*
+ * Check if plane_count is correct
+ * (multiplane buffers are not supported).
+ */
+ if (q->bufs[0]->num_planes != 1) {
+ ret = -EBUSY;
+ goto err_reqbufs;
+ }
+
+ /*
+ * Get kernel address of each buffer.
+ */
+ for (i = 0; i < q->num_buffers; i++) {
+ fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
+ if (fileio->bufs[i].vaddr == NULL) {
+ ret = -EINVAL;
+ goto err_reqbufs;
+ }
+ fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
+ }
+
+ /*
+ * Read mode requires pre queuing of all buffers.
+ */
+ if (read) {
+ bool is_multiplanar = q->is_multiplanar;
+
+ /*
+ * Queue all buffers.
+ */
+ for (i = 0; i < q->num_buffers; i++) {
+ struct v4l2_buffer *b = &fileio->b;
+
+ memset(b, 0, sizeof(*b));
+ b->type = q->type;
+ if (is_multiplanar) {
+ memset(&fileio->p, 0, sizeof(fileio->p));
+ b->m.planes = &fileio->p;
+ b->length = 1;
+ }
+ b->memory = q->memory;
+ b->index = i;
+ ret = vb2_internal_qbuf(q, b);
+ if (ret)
+ goto err_reqbufs;
+ fileio->bufs[i].queued = 1;
+ }
+ /*
+ * All buffers have been queued, so mark that by setting
+ * initial_index to q->num_buffers
+ */
+ fileio->initial_index = q->num_buffers;
+ fileio->cur_index = q->num_buffers;
+ }
+
+ /*
+ * Start streaming.
+ */
+ ret = vb2_core_streamon(q, q->type);
+ if (ret)
+ goto err_reqbufs;
+
+ return ret;
+
+err_reqbufs:
+ fileio->req.count = 0;
+ vb2_core_reqbufs(q, fileio->req.memory, &fileio->req.count);
+
+err_kfree:
+ q->fileio = NULL;
+ kfree(fileio);
+ return ret;
+}
+
+/**
+ * __vb2_cleanup_fileio() - free resourced used by file io emulator
+ * @q: videobuf2 queue
+ */
+static int __vb2_cleanup_fileio(struct vb2_queue *q)
+{
+ struct vb2_fileio_data *fileio = q->fileio;
+
+ if (fileio) {
+ vb2_core_streamoff(q, q->type);
+ q->fileio = NULL;
+ fileio->req.count = 0;
+ vb2_reqbufs(q, &fileio->req);
+ kfree(fileio);
+ dprintk(3, "file io emulator closed\n");
+ }
+ return 0;
+}
+
+/**
+ * __vb2_perform_fileio() - perform a single file io (read or write) operation
+ * @q: videobuf2 queue
+ * @data: pointed to target userspace buffer
+ * @count: number of bytes to read or write
+ * @ppos: file handle position tracking pointer
+ * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
+ * @read: access mode selector (1 means read, 0 means write)
+ */
+static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
+ loff_t *ppos, int nonblock, int read)
+{
+ struct vb2_fileio_data *fileio;
+ struct vb2_fileio_buf *buf;
+ bool is_multiplanar = q->is_multiplanar;
+ /*
+ * When using write() to write data to an output video node the vb2 core
+ * should set timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
+ * else is able to provide this information with the write() operation.
+ */
+ bool set_timestamp = !read &&
+ (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ int ret, index;
+
+ dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
+ read ? "read" : "write", (long)*ppos, count,
+ nonblock ? "non" : "");
+
+ if (!data)
+ return -EINVAL;
+
+ /*
+ * Initialize emulator on first call.
+ */
+ if (!vb2_fileio_is_active(q)) {
+ ret = __vb2_init_fileio(q, read);
+ dprintk(3, "vb2_init_fileio result: %d\n", ret);
+ if (ret)
+ return ret;
+ }
+ fileio = q->fileio;
+
+ /*
+ * Check if we need to dequeue the buffer.
+ */
+ index = fileio->cur_index;
+ if (index >= q->num_buffers) {
+ /*
+ * Call vb2_dqbuf to get buffer back.
+ */
+ memset(&fileio->b, 0, sizeof(fileio->b));
+ fileio->b.type = q->type;
+ fileio->b.memory = q->memory;
+ if (is_multiplanar) {
+ memset(&fileio->p, 0, sizeof(fileio->p));
+ fileio->b.m.planes = &fileio->p;
+ fileio->b.length = 1;
+ }
+ ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
+ dprintk(5, "vb2_dqbuf result: %d\n", ret);
+ if (ret)
+ return ret;
+ fileio->dq_count += 1;
+
+ fileio->cur_index = index = fileio->b.index;
+ buf = &fileio->bufs[index];
+
+ /*
+ * Get number of bytes filled by the driver
+ */
+ buf->pos = 0;
+ buf->queued = 0;
+ buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
+ : vb2_plane_size(q->bufs[index], 0);
+ /* Compensate for data_offset on read in the multiplanar case. */
+ if (is_multiplanar && read &&
+ fileio->b.m.planes[0].data_offset < buf->size) {
+ buf->pos = fileio->b.m.planes[0].data_offset;
+ buf->size -= buf->pos;
+ }
+ } else {
+ buf = &fileio->bufs[index];
+ }
+
+ /*
+ * Limit count on last few bytes of the buffer.
+ */
+ if (buf->pos + count > buf->size) {
+ count = buf->size - buf->pos;
+ dprintk(5, "reducing read count: %zd\n", count);
+ }
+
+ /*
+ * Transfer data to userspace.
+ */
+ dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
+ count, index, buf->pos);
+ if (read)
+ ret = copy_to_user(data, buf->vaddr + buf->pos, count);
+ else
+ ret = copy_from_user(buf->vaddr + buf->pos, data, count);
+ if (ret) {
+ dprintk(3, "error copying data\n");
+ return -EFAULT;
+ }
+
+ /*
+ * Update counters.
+ */
+ buf->pos += count;
+ *ppos += count;
+
+ /*
+ * Queue next buffer if required.
+ */
+ if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
+ /*
+ * Check if this is the last buffer to read.
+ */
+ if (read && fileio->read_once && fileio->dq_count == 1) {
+ dprintk(3, "read limit reached\n");
+ return __vb2_cleanup_fileio(q);
+ }
+
+ /*
+ * Call vb2_qbuf and give buffer to the driver.
+ */
+ memset(&fileio->b, 0, sizeof(fileio->b));
+ fileio->b.type = q->type;
+ fileio->b.memory = q->memory;
+ fileio->b.index = index;
+ fileio->b.bytesused = buf->pos;
+ if (is_multiplanar) {
+ memset(&fileio->p, 0, sizeof(fileio->p));
+ fileio->p.bytesused = buf->pos;
+ fileio->b.m.planes = &fileio->p;
+ fileio->b.length = 1;
+ }
+ if (set_timestamp)
+ v4l2_get_timestamp(&fileio->b.timestamp);
+ ret = vb2_internal_qbuf(q, &fileio->b);
+ dprintk(5, "vb2_dbuf result: %d\n", ret);
+ if (ret)
+ return ret;
+
+ /*
+ * Buffer has been queued, update the status
+ */
+ buf->pos = 0;
+ buf->queued = 1;
+ buf->size = vb2_plane_size(q->bufs[index], 0);
+ fileio->q_count += 1;
+ /*
+ * If we are queuing up buffers for the first time, then
+ * increase initial_index by one.
+ */
+ if (fileio->initial_index < q->num_buffers)
+ fileio->initial_index++;
+ /*
+ * The next buffer to use is either a buffer that's going to be
+ * queued for the first time (initial_index < q->num_buffers)
+ * or it is equal to q->num_buffers, meaning that the next
+ * time we need to dequeue a buffer since we've now queued up
+ * all the 'first time' buffers.
+ */
+ fileio->cur_index = fileio->initial_index;
+ }
+
+ /*
+ * Return proper number of bytes processed.
+ */
+ if (ret == 0)
+ ret = count;
+ return ret;
+}
+
+size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
+ loff_t *ppos, int nonblocking)
+{
+ return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
+}
+EXPORT_SYMBOL_GPL(vb2_read);
+
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
+ loff_t *ppos, int nonblocking)
+{
+ return __vb2_perform_fileio(q, (char __user *) data, count,
+ ppos, nonblocking, 0);
+}
+EXPORT_SYMBOL_GPL(vb2_write);
+
+struct vb2_threadio_data {
+ struct task_struct *thread;
+ vb2_thread_fnc fnc;
+ void *priv;
+ bool stop;
+};
+
+static int vb2_thread(void *data)
+{
+ struct vb2_queue *q = data;
+ struct vb2_threadio_data *threadio = q->threadio;
+ struct vb2_fileio_data *fileio = q->fileio;
+ bool set_timestamp = false;
+ int prequeue = 0;
+ int index = 0;
+ int ret = 0;
+
+ if (q->is_output) {
+ prequeue = q->num_buffers;
+ set_timestamp =
+ (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ }
+
+ set_freezable();
+
+ for (;;) {
+ struct vb2_buffer *vb;
+
+ /*
+ * Call vb2_dqbuf to get buffer back.
+ */
+ memset(&fileio->b, 0, sizeof(fileio->b));
+ fileio->b.type = q->type;
+ fileio->b.memory = q->memory;
+ if (prequeue) {
+ fileio->b.index = index++;
+ prequeue--;
+ } else {
+ call_void_qop(q, wait_finish, q);
+ if (!threadio->stop)
+ ret = vb2_internal_dqbuf(q, &fileio->b, 0);
+ call_void_qop(q, wait_prepare, q);
+ dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
+ }
+ if (ret || threadio->stop)
+ break;
+ try_to_freeze();
+
+ vb = q->bufs[fileio->b.index];
+ if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
+ if (threadio->fnc(vb, threadio->priv))
+ break;
+ call_void_qop(q, wait_finish, q);
+ if (set_timestamp)
+ v4l2_get_timestamp(&fileio->b.timestamp);
+ if (!threadio->stop)
+ ret = vb2_internal_qbuf(q, &fileio->b);
+ call_void_qop(q, wait_prepare, q);
+ if (ret || threadio->stop)
+ break;
+ }
+
+ /* Hmm, linux becomes *very* unhappy without this ... */
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ return 0;
+}
+
+/*
+ * This function should not be used for anything else but the videobuf2-dvb
+ * support. If you think you have another good use-case for this, then please
+ * contact the linux-media mailinglist first.
+ */
+int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
+ const char *thread_name)
+{
+ struct vb2_threadio_data *threadio;
+ int ret = 0;
+
+ if (q->threadio)
+ return -EBUSY;
+ if (vb2_is_busy(q))
+ return -EBUSY;
+ if (WARN_ON(q->fileio))
+ return -EBUSY;
+
+ threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
+ if (threadio == NULL)
+ return -ENOMEM;
+ threadio->fnc = fnc;
+ threadio->priv = priv;
+
+ ret = __vb2_init_fileio(q, !q->is_output);
+ dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
+ if (ret)
+ goto nomem;
+ q->threadio = threadio;
+ threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
+ if (IS_ERR(threadio->thread)) {
+ ret = PTR_ERR(threadio->thread);
+ threadio->thread = NULL;
+ goto nothread;
+ }
+ return 0;
+
+nothread:
+ __vb2_cleanup_fileio(q);
+nomem:
+ kfree(threadio);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_thread_start);
+
+int vb2_thread_stop(struct vb2_queue *q)
+{
+ struct vb2_threadio_data *threadio = q->threadio;
+ int err;
+
+ if (threadio == NULL)
+ return 0;
+ threadio->stop = true;
+ /* Wake up all pending sleeps in the thread */
+ vb2_queue_error(q);
+ err = kthread_stop(threadio->thread);
+ __vb2_cleanup_fileio(q);
+ threadio->thread = NULL;
+ kfree(threadio);
+ q->threadio = NULL;
+ return err;
+}
+EXPORT_SYMBOL_GPL(vb2_thread_stop);
+
+/*
+ * The following functions are not part of the vb2 core API, but are helper
+ * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
+ * and struct vb2_ops.
+ * They contain boilerplate code that most if not all drivers have to do
+ * and so they simplify the driver code.
+ */
+
+/* The queue is busy if there is a owner and you are not that owner. */
+static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
+{
+ return vdev->queue->owner && vdev->queue->owner != file->private_data;
+}
+
+/* vb2 ioctl helpers */
+
+int vb2_ioctl_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct video_device *vdev = video_devdata(file);
+ int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
+
+ if (res)
+ return res;
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
+ /* If count == 0, then the owner has released all buffers and he
+ is no longer owner of the queue. Otherwise we have a new owner. */
+ if (res == 0)
+ vdev->queue->owner = p->count ? file->private_data : NULL;
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
+
+int vb2_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *p)
+{
+ struct video_device *vdev = video_devdata(file);
+ int res = vb2_verify_memory_type(vdev->queue, p->memory,
+ p->format.type);
+
+ p->index = vdev->queue->num_buffers;
+ /*
+ * If count == 0, then just check if memory and type are valid.
+ * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
+ */
+ if (p->count == 0)
+ return res != -EBUSY ? res : 0;
+ if (res)
+ return res;
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ res = vb2_core_create_bufs(vdev->queue, p->memory, &p->count,
+ &p->format);
+ if (res == 0)
+ vdev->queue->owner = file->private_data;
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
+
+int vb2_ioctl_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_prepare_buf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
+
+int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
+ return vb2_querybuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
+
+int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_qbuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
+
+int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
+
+int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_streamon(vdev->queue, i);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
+
+int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_streamoff(vdev->queue, i);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
+
+int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_expbuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
+
+/* v4l2_file_operations helpers */
+
+int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ return vb2_mmap(vdev->queue, vma);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_mmap);
+
+int _vb2_fop_release(struct file *file, struct mutex *lock)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (lock)
+ mutex_lock(lock);
+ if (file->private_data == vdev->queue->owner) {
+ vb2_queue_release(vdev->queue);
+ vdev->queue->owner = NULL;
+ }
+ if (lock)
+ mutex_unlock(lock);
+ return v4l2_fh_release(file);
+}
+EXPORT_SYMBOL_GPL(_vb2_fop_release);
+
+int vb2_fop_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+
+ return _vb2_fop_release(file, lock);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_release);
+
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+ int err = -EBUSY;
+
+ if (!(vdev->queue->io_modes & VB2_WRITE))
+ return -EINVAL;
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ if (vb2_queue_is_busy(vdev, file))
+ goto exit;
+ err = vb2_write(vdev->queue, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
+ if (vdev->queue->fileio)
+ vdev->queue->owner = file->private_data;
+exit:
+ if (lock)
+ mutex_unlock(lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_write);
+
+ssize_t vb2_fop_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+ int err = -EBUSY;
+
+ if (!(vdev->queue->io_modes & VB2_READ))
+ return -EINVAL;
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ if (vb2_queue_is_busy(vdev, file))
+ goto exit;
+ err = vb2_read(vdev->queue, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
+ if (vdev->queue->fileio)
+ vdev->queue->owner = file->private_data;
+exit:
+ if (lock)
+ mutex_unlock(lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_read);
+
+unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct vb2_queue *q = vdev->queue;
+ struct mutex *lock = q->lock ? q->lock : vdev->lock;
+ unsigned res;
+ void *fileio;
+
+ /*
+ * If this helper doesn't know how to lock, then you shouldn't be using
+ * it but you should write your own.
+ */
+ WARN_ON(!lock);
+
+ if (lock && mutex_lock_interruptible(lock))
+ return POLLERR;
+
+ fileio = q->fileio;
+
+ res = vb2_poll(vdev->queue, file, wait);
+
+ /* If fileio was started, then we have a new queue owner. */
+ if (!fileio && q->fileio)
+ q->owner = file->private_data;
+ if (lock)
+ mutex_unlock(lock);
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_poll);
+
+#ifndef CONFIG_MMU
+unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
+#endif
+
+/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
+
+void vb2_ops_wait_prepare(struct vb2_queue *vq)
+{
+ mutex_unlock(vq->lock);
+}
+EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
+
+void vb2_ops_wait_finish(struct vb2_queue *vq)
+{
+ mutex_lock(vq->lock);
+}
+EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
+
+MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index ecb8f0c7f025..1c302743a1fd 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
#include <media/videobuf2-memops.h>
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index 1e845f6d407b..03d62f7b4720 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -93,7 +93,6 @@ MODULE_DEVICE_TABLE(spi, arizona_spi_ids);
static struct spi_driver arizona_spi_driver = {
.driver = {
.name = "arizona",
- .owner = THIS_MODULE,
.pm = &arizona_pm_ops,
.of_match_table = of_match_ptr(arizona_of_match),
},
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 30a296b4e748..6a0f6ec67c6b 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -717,7 +717,6 @@ static struct spi_driver cros_ec_driver_spi = {
.driver = {
.name = "cros-ec-spi",
.of_match_table = of_match_ptr(cros_ec_spi_of_match),
- .owner = THIS_MODULE,
.pm = &cros_ec_spi_pm_ops,
},
.probe = cros_ec_spi_probe,
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index b5de8a6856c0..71b89dd4e8de 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -86,7 +86,6 @@ static struct spi_driver da9052_spi_driver = {
.id_table = da9052_spi_id,
.driver = {
.name = "da9052",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index b279205659a4..542b47c6bcd2 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -513,7 +513,6 @@ static struct spi_driver ezxpcap_driver = {
.remove = ezx_pcap_remove,
.driver = {
.name = "ezx-pcap",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 58a170e45d88..cbc1e5ed599c 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -177,7 +177,6 @@ static struct spi_driver mc13xxx_spi_driver = {
.id_table = mc13xxx_device_id,
.driver = {
.name = "mc13xxx",
- .owner = THIS_MODULE,
.of_match_table = mc13xxx_dt_ids,
},
.probe = mc13xxx_spi_probe,
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index 618ba244d98a..f8b14ab8b9d7 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -135,7 +135,6 @@ static struct spi_driver stmpe_spi_driver = {
.driver = {
.name = "stmpe-spi",
.of_match_table = of_match_ptr(stmpe_spi_of_match),
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &stmpe_dev_pm_ops,
#endif
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index 5de95c265c1a..4a174cdb50b6 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
-#include <linux/mutex.h>
+#include <linux/regmap.h>
#include <linux/gpio.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -25,73 +25,18 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps6105x.h>
-int tps6105x_set(struct tps6105x *tps6105x, u8 reg, u8 value)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&tps6105x->lock);
- if (ret)
- return ret;
- ret = i2c_smbus_write_byte_data(tps6105x->client, reg, value);
- mutex_unlock(&tps6105x->lock);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL(tps6105x_set);
-
-int tps6105x_get(struct tps6105x *tps6105x, u8 reg, u8 *buf)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&tps6105x->lock);
- if (ret)
- return ret;
- ret = i2c_smbus_read_byte_data(tps6105x->client, reg);
- mutex_unlock(&tps6105x->lock);
- if (ret < 0)
- return ret;
-
- *buf = ret;
- return 0;
-}
-EXPORT_SYMBOL(tps6105x_get);
-
-/*
- * Masks off the bits in the mask and sets the bits in the bitvalues
- * parameter in one atomic operation
- */
-int tps6105x_mask_and_set(struct tps6105x *tps6105x, u8 reg,
- u8 bitmask, u8 bitvalues)
-{
- int ret;
- u8 regval;
-
- ret = mutex_lock_interruptible(&tps6105x->lock);
- if (ret)
- return ret;
- ret = i2c_smbus_read_byte_data(tps6105x->client, reg);
- if (ret < 0)
- goto fail;
- regval = ret;
- regval = (~bitmask & regval) | (bitmask & bitvalues);
- ret = i2c_smbus_write_byte_data(tps6105x->client, reg, regval);
-fail:
- mutex_unlock(&tps6105x->lock);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL(tps6105x_mask_and_set);
+static struct regmap_config tps6105x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TPS6105X_REG_3,
+};
static int tps6105x_startup(struct tps6105x *tps6105x)
{
int ret;
- u8 regval;
+ unsigned int regval;
- ret = tps6105x_get(tps6105x, TPS6105X_REG_0, &regval);
+ ret = regmap_read(tps6105x->regmap, TPS6105X_REG_0, &regval);
if (ret)
return ret;
switch (regval >> TPS6105X_REG0_MODE_SHIFT) {
@@ -145,11 +90,14 @@ static int tps6105x_probe(struct i2c_client *client,
if (!tps6105x)
return -ENOMEM;
+ tps6105x->regmap = devm_regmap_init_i2c(client, &tps6105x_regmap_config);
+ if (IS_ERR(tps6105x->regmap))
+ return PTR_ERR(tps6105x->regmap);
+
i2c_set_clientdata(client, tps6105x);
tps6105x->client = client;
pdata = dev_get_platdata(&client->dev);
tps6105x->pdata = pdata;
- mutex_init(&tps6105x->lock);
ret = tps6105x_startup(tps6105x);
if (ret) {
@@ -198,7 +146,7 @@ static int tps6105x_remove(struct i2c_client *client)
mfd_remove_devices(&client->dev);
/* Put chip in shutdown mode */
- tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0,
+ regmap_update_bits(tps6105x->regmap, TPS6105X_REG_0,
TPS6105X_REG0_MODE_MASK,
TPS6105X_MODE_SHUTDOWN << TPS6105X_REG0_MODE_SHIFT);
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index de60ad98bd9f..d59aa55b1495 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -111,7 +111,6 @@ static int tps65912_spi_remove(struct spi_device *spi)
static struct spi_driver tps65912_spi_driver = {
.driver = {
.name = "tps65912",
- .owner = THIS_MODULE,
},
.probe = tps65912_spi_probe,
.remove = tps65912_spi_remove,
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index b8a5e3b34ec7..80482aeb246a 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -96,7 +96,6 @@ MODULE_DEVICE_TABLE(spi, wm831x_spi_ids);
static struct spi_driver wm831x_spi_driver = {
.driver = {
.name = "wm831x",
- .owner = THIS_MODULE,
.pm = &wm831x_spi_pm,
},
.id_table = wm831x_spi_ids,
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index f4c82eafa8e5..39a7f517ee7e 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -132,7 +132,6 @@ MODULE_DEVICE_TABLE(spi, ad_dpot_spi_id);
static struct spi_driver ad_dpot_spi_driver = {
.driver = {
.name = "ad_dpot",
- .owner = THIS_MODULE,
},
.probe = ad_dpot_spi_probe,
.remove = ad_dpot_spi_remove,
diff --git a/drivers/misc/bmp085-spi.c b/drivers/misc/bmp085-spi.c
index 864ecac32373..17ecbf95ff15 100644
--- a/drivers/misc/bmp085-spi.c
+++ b/drivers/misc/bmp085-spi.c
@@ -64,7 +64,6 @@ MODULE_DEVICE_TABLE(spi, bmp085_id);
static struct spi_driver bmp085_spi_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = BMP085_NAME,
.of_match_table = bmp085_of_match
},
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 0a1af93ec638..f850ef556bcc 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -462,7 +462,6 @@ MODULE_DEVICE_TABLE(of, at25_of_match);
static struct spi_driver at25_driver = {
.driver = {
.name = "at25",
- .owner = THIS_MODULE,
.of_match_table = at25_of_match,
},
.probe = at25_probe,
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index a6bd9e3fe9d3..ff63f05edc76 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -370,7 +370,6 @@ static int eeprom_93xx46_remove(struct spi_device *spi)
static struct spi_driver eeprom_93xx46_driver = {
.driver = {
.name = "93xx46",
- .owner = THIS_MODULE,
},
.probe = eeprom_93xx46_probe,
.remove = eeprom_93xx46_remove,
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index c544f1f50f52..626fdcaf2510 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -235,7 +235,6 @@ MODULE_DEVICE_TABLE(spi, lattice_ecp3_id);
static struct spi_driver lattice_ecp3_driver = {
.driver = {
.name = "lattice-ecp3",
- .owner = THIS_MODULE,
},
.probe = lattice_ecp3_probe,
.remove = lattice_ecp3_remove,
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index b2f6e1651ac9..e575475123c8 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -138,7 +138,6 @@ static SIMPLE_DEV_PM_OPS(lis3lv02d_spi_pm, lis3lv02d_spi_suspend,
static struct spi_driver lis302dl_spi_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &lis3lv02d_spi_pm,
.of_match_table = of_match_ptr(lis302dl_spi_dt_ids),
},
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
index cb0289b44a17..f5456fb7d773 100644
--- a/drivers/misc/ti_dac7512.c
+++ b/drivers/misc/ti_dac7512.c
@@ -89,7 +89,6 @@ MODULE_DEVICE_TABLE(of, dac7512_of_match);
static struct spi_driver dac7512_driver = {
.driver = {
.name = "dac7512",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(dac7512_of_match),
},
.probe = dac7512_probe,
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 8ee11f4120fc..1c1b45ef3faf 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1516,7 +1516,6 @@ MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
static struct spi_driver mmc_spi_driver = {
.driver = {
.name = "mmc_spi",
- .owner = THIS_MODULE,
.of_match_table = mmc_spi_of_match_table,
},
.probe = mmc_spi_probe,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 9cd3631170ef..fcf171a1325e 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -304,7 +304,6 @@ MODULE_DEVICE_TABLE(of, m25p_of_table);
static struct spi_driver m25p80_driver = {
.driver = {
.name = "m25p80",
- .owner = THIS_MODULE,
.of_match_table = m25p_of_table,
},
.id_table = m25p_ids,
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index df6f61137376..70c16399e8ba 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -911,7 +911,6 @@ static int dataflash_remove(struct spi_device *spi)
static struct spi_driver dataflash_driver = {
.driver = {
.name = "mtd_dataflash",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(dataflash_dt_ids),
},
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index c63ecbcad0b7..18febf74404d 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -417,7 +417,6 @@ static int sst25l_remove(struct spi_device *spi)
static struct spi_driver sst25l_driver = {
.driver = {
.name = "sst25l",
- .owner = THIS_MODULE,
},
.probe = sst25l_probe,
.remove = sst25l_remove,
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index b7e83c212023..575790e8a75a 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1243,7 +1243,6 @@ static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
static struct spi_driver mcp251x_can_driver = {
.driver = {
.name = DEVICE_NAME,
- .owner = THIS_MODULE,
.of_match_table = mcp251x_of_match,
.pm = &mcp251x_can_pm_ops,
},
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 60f43ec22175..1edc973df4c4 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1607,7 +1607,6 @@ static struct spi_driver ks8851_driver = {
.driver = {
.name = "ks8851",
.of_match_table = ks8851_match_table,
- .owner = THIS_MODULE,
.pm = &ks8851_pm_ops,
},
.probe = ks8851_probe,
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index b1b5f66b8b69..86ea17e7ba7b 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1633,7 +1633,6 @@ static int enc28j60_remove(struct spi_device *spi)
static struct spi_driver enc28j60_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = enc28j60_probe,
.remove = enc28j60_remove,
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 2f87909f5186..ddb2c6c6ec94 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -974,7 +974,6 @@ MODULE_DEVICE_TABLE(spi, qca_spi_id);
static struct spi_driver qca_spi_driver = {
.driver = {
.name = QCASPI_DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = qca_spi_of_match,
},
.id_table = qca_spi_id,
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index de6e4fa2d6aa..0fbbba7a0cae 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1816,7 +1816,6 @@ static struct spi_driver at86rf230_driver = {
.driver = {
.of_match_table = of_match_ptr(at86rf230_of_match),
.name = "at86rf230",
- .owner = THIS_MODULE,
},
.probe = at86rf230_probe,
.remove = at86rf230_remove,
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index c5b54a15fc4c..e65b60591317 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -1152,7 +1152,6 @@ MODULE_DEVICE_TABLE(of, cc2520_of_ids);
static struct spi_driver cc2520_driver = {
.driver = {
.name = "cc2520",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(cc2520_of_ids),
},
.id_table = cc2520_ids,
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index aca0fb3cccbf..4cdf51638972 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -1382,7 +1382,6 @@ static struct spi_driver mrf24j40_driver = {
.driver = {
.of_match_table = of_match_ptr(mrf24j40_of_match),
.name = "mrf24j40",
- .owner = THIS_MODULE,
},
.id_table = mrf24j40_ids,
.probe = mrf24j40_probe,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index f091d691cf6f..c72c42206850 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -343,7 +343,6 @@ static int ks8995_remove(struct spi_device *spi)
static struct spi_driver ks8995_driver = {
.driver = {
.name = "spi-ks8995",
- .owner = THIS_MODULE,
},
.probe = ks8995_probe,
.remove = ks8995_remove,
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 29185aeccba8..a740083634d8 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -467,7 +467,6 @@ static struct spi_driver spi_driver = {
.remove = cw1200_spi_disconnect,
.driver = {
.name = "cw1200_wlan_spi",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &cw1200_pm_ops,
#endif
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index f11728a866ff..82c0796377aa 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1283,7 +1283,6 @@ static struct spi_driver libertas_spi_driver = {
.remove = libertas_spi_remove,
.driver = {
.name = "libertas_spi",
- .owner = THIS_MODULE,
.pm = &if_spi_pm_ops,
},
};
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 63de5eed25cf..7ab2f43ab425 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -705,7 +705,6 @@ static int p54spi_remove(struct spi_device *spi)
static struct spi_driver p54spi_driver = {
.driver = {
.name = "p54spi",
- .owner = THIS_MODULE,
},
.probe = p54spi_probe,
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 735be5352143..8de9d4444a6a 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -354,7 +354,6 @@ static int wl1251_spi_remove(struct spi_device *spi)
static struct spi_driver wl1251_spi_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = wl1251_spi_probe,
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index f1ac2839d97c..236b41090827 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -408,7 +408,6 @@ static int wl1271_remove(struct spi_device *spi)
static struct spi_driver wl1271_spi_driver = {
.driver = {
.name = "wl1271_spi",
- .owner = THIS_MODULE,
},
.probe = wl1271_probe,
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index cf7ad8121e11..d6519bb9dba5 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -384,7 +384,6 @@ MODULE_DEVICE_TABLE(of, of_st_nci_spi_match);
static struct spi_driver st_nci_spi_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = ST_NCI_SPI_DRIVER_NAME,
.of_match_table = of_match_ptr(of_st_nci_spi_match),
},
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 123aa981c9d8..f857feb2b573 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -2230,7 +2230,6 @@ static struct spi_driver trf7970a_spi_driver = {
.driver = {
.name = "trf7970a",
.of_match_table = of_match_ptr(trf7970a_of_match),
- .owner = THIS_MODULE,
.pm = &trf7970a_pm_ops,
},
};
diff --git a/drivers/power/88pm860x_battery.c b/drivers/power/88pm860x_battery.c
index d49579b227ec..63c57dc82ac1 100644
--- a/drivers/power/88pm860x_battery.c
+++ b/drivers/power/88pm860x_battery.c
@@ -954,47 +954,33 @@ static int pm860x_battery_probe(struct platform_device *pdev)
else
info->resistor = 300; /* set default internal resistor */
- info->battery = power_supply_register(&pdev->dev, &pm860x_battery_desc,
- NULL);
+ info->battery = devm_power_supply_register(&pdev->dev,
+ &pm860x_battery_desc,
+ NULL);
if (IS_ERR(info->battery))
return PTR_ERR(info->battery);
info->battery->dev.parent = &pdev->dev;
- ret = request_threaded_irq(info->irq_cc, NULL,
- pm860x_coulomb_handler, IRQF_ONESHOT,
- "coulomb", info);
+ ret = devm_request_threaded_irq(chip->dev, info->irq_cc, NULL,
+ pm860x_coulomb_handler, IRQF_ONESHOT,
+ "coulomb", info);
if (ret < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
info->irq_cc, ret);
- goto out_reg;
+ return ret;
}
- ret = request_threaded_irq(info->irq_batt, NULL, pm860x_batt_handler,
- IRQF_ONESHOT, "battery", info);
+ ret = devm_request_threaded_irq(chip->dev, info->irq_batt, NULL,
+ pm860x_batt_handler,
+ IRQF_ONESHOT, "battery", info);
if (ret < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
info->irq_batt, ret);
- goto out_coulomb;
+ return ret;
}
return 0;
-
-out_coulomb:
- free_irq(info->irq_cc, info);
-out_reg:
- power_supply_unregister(info->battery);
- return ret;
-}
-
-static int pm860x_battery_remove(struct platform_device *pdev)
-{
- struct pm860x_battery_info *info = platform_get_drvdata(pdev);
-
- free_irq(info->irq_batt, info);
- free_irq(info->irq_cc, info);
- power_supply_unregister(info->battery);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1028,7 +1014,6 @@ static struct platform_driver pm860x_battery_driver = {
.pm = &pm860x_battery_pm_ops,
},
.probe = pm860x_battery_probe,
- .remove = pm860x_battery_remove,
};
module_platform_driver(pm860x_battery_driver);
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index f8758d6febf8..02b3b313809a 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -157,26 +157,25 @@ config BATTERY_SBS
Say Y to include support for SBS battery driver for SBS-compliant
gas gauges.
-config BATTERY_BQ27x00
- tristate "BQ27x00 battery driver"
- depends on I2C || I2C=n
+config BATTERY_BQ27XXX
+ tristate "BQ27xxx battery driver"
help
- Say Y here to enable support for batteries with BQ27x00 (I2C/HDQ) chips.
+ Say Y here to enable support for batteries with BQ27xxx (I2C/HDQ) chips.
-config BATTERY_BQ27X00_I2C
- bool "BQ27200/BQ27500 support"
- depends on BATTERY_BQ27x00
+config BATTERY_BQ27XXX_I2C
+ bool "BQ27xxx I2C support"
+ depends on BATTERY_BQ27XXX
depends on I2C
default y
help
- Say Y here to enable support for batteries with BQ27x00 (I2C) chips.
+ Say Y here to enable support for batteries with BQ27xxx (I2C) chips.
-config BATTERY_BQ27X00_PLATFORM
- bool "BQ27000 support"
- depends on BATTERY_BQ27x00
+config BATTERY_BQ27XXX_PLATFORM
+ bool "BQ27xxx HDQ support"
+ depends on BATTERY_BQ27XXX
default y
help
- Say Y here to enable support for batteries with BQ27000 (HDQ) chips.
+ Say Y here to enable support for batteries with BQ27xxx (HDQ) chips.
config BATTERY_DA9030
tristate "DA9030 battery driver"
@@ -313,7 +312,7 @@ config CHARGER_MAX8903
config CHARGER_TWL4030
tristate "OMAP TWL4030 BCI charger driver"
- depends on TWL4030_CORE
+ depends on IIO && TWL4030_CORE
help
Say Y here to enable support for TWL4030 Battery Charge Interface.
@@ -379,6 +378,18 @@ config CHARGER_MAX8998
Say Y to enable support for the battery charger control sysfs and
platform data of MAX8998/LP3974 PMICs.
+config CHARGER_QCOM_SMBB
+ tristate "Qualcomm Switch-Mode Battery Charger and Boost"
+ depends on MFD_SPMI_PMIC || COMPILE_TEST
+ depends on OF
+ help
+ Say Y to include support for the Switch-Mode Battery Charger and
+ Boost (SMBB) hardware found in Qualcomm PM8941 PMICs. The charger
+ is an integrated, single-cell lithium-ion battery charger. DT
+ configuration is required for loading, see the devicetree
+ documentation for more detail. The base name for this driver is
+ 'pm8941_charger'.
+
config CHARGER_BQ2415X
tristate "TI BQ2415x battery charger driver"
depends on I2C
@@ -397,12 +408,13 @@ config CHARGER_BQ24190
Say Y to enable support for the TI BQ24190 battery charger.
config CHARGER_BQ24257
- tristate "TI BQ24257 battery charger driver"
+ tristate "TI BQ24250/24251/24257 battery charger driver"
depends on I2C
depends on GPIOLIB || COMPILE_TEST
depends on REGMAP_I2C
help
- Say Y to enable support for the TI BQ24257 battery charger.
+ Say Y to enable support for the TI BQ24250, BQ24251, and BQ24257 battery
+ chargers.
config CHARGER_BQ24735
tristate "TI BQ24735 battery charger support"
@@ -434,6 +446,13 @@ config CHARGER_TPS65090
Say Y here to enable support for battery charging with TPS65090
PMIC chips.
+config CHARGER_TPS65217
+ tristate "TPS65217 battery charger driver"
+ depends on MFD_TPS65217
+ help
+ Say Y here to enable support for battery charging with TPS65217
+ PMIC chips.
+
config BATTERY_GAUGE_LTC2941
tristate "LTC2941/LTC2943 Battery Gauge Driver"
depends on I2C
@@ -472,6 +491,13 @@ config CHARGER_RT9455
help
Say Y to enable support for Richtek RT9455 battery charger.
+config AXP20X_POWER
+ tristate "AXP20x power supply driver"
+ depends on MFD_AXP20X
+ help
+ This driver provides support for the power supply features of
+ AXP20x PMIC.
+
source "drivers/power/reset/Kconfig"
endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 5752ce818f51..b0e1bf190e3d 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_GENERIC_ADC_BATTERY) += generic-adc-battery.o
obj-$(CONFIG_PDA_POWER) += pda_power.o
obj-$(CONFIG_APM_POWER) += apm_power.o
+obj-$(CONFIG_AXP20X_POWER) += axp20x_usb_power.o
obj-$(CONFIG_MAX8925_POWER) += max8925_power.o
obj-$(CONFIG_WM831X_BACKUP) += wm831x_backup.o
obj-$(CONFIG_WM831X_POWER) += wm831x_power.o
@@ -29,7 +30,7 @@ obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
-obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
+obj-$(CONFIG_BATTERY_BQ27XXX) += bq27xxx_battery.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
obj-$(CONFIG_BATTERY_DA9052) += da9052-battery.o
obj-$(CONFIG_CHARGER_DA9150) += da9150-charger.o
@@ -57,6 +58,7 @@ obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
+obj-$(CONFIG_CHARGER_QCOM_SMBB) += qcom_smbb.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
obj-$(CONFIG_CHARGER_BQ24257) += bq24257_charger.o
@@ -65,6 +67,7 @@ obj-$(CONFIG_CHARGER_BQ25890) += bq25890_charger.o
obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
+obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
obj-$(CONFIG_POWER_RESET) += reset/
obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
diff --git a/drivers/power/axp20x_usb_power.c b/drivers/power/axp20x_usb_power.c
new file mode 100644
index 000000000000..421a90b83567
--- /dev/null
+++ b/drivers/power/axp20x_usb_power.c
@@ -0,0 +1,248 @@
+/*
+ * AXP20x PMIC USB power supply status driver
+ *
+ * Copyright (C) 2015 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2014 Bruno Prémont <bonbons@linux-vserver.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define DRVNAME "axp20x-usb-power-supply"
+
+#define AXP20X_PWR_STATUS_VBUS_PRESENT BIT(5)
+#define AXP20X_PWR_STATUS_VBUS_USED BIT(4)
+
+#define AXP20X_USB_STATUS_VBUS_VALID BIT(2)
+
+#define AXP20X_VBUS_VHOLD_uV(b) (4000000 + (((b) >> 3) & 7) * 100000)
+#define AXP20X_VBUS_CLIMIT_MASK 3
+#define AXP20X_VBUC_CLIMIT_900mA 0
+#define AXP20X_VBUC_CLIMIT_500mA 1
+#define AXP20X_VBUC_CLIMIT_100mA 2
+#define AXP20X_VBUC_CLIMIT_NONE 3
+
+#define AXP20X_ADC_EN1_VBUS_CURR BIT(2)
+#define AXP20X_ADC_EN1_VBUS_VOLT BIT(3)
+
+#define AXP20X_VBUS_MON_VBUS_VALID BIT(3)
+
+struct axp20x_usb_power {
+ struct regmap *regmap;
+ struct power_supply *supply;
+};
+
+static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
+{
+ struct axp20x_usb_power *power = devid;
+
+ power_supply_changed(power->supply);
+
+ return IRQ_HANDLED;
+}
+
+static int axp20x_usb_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
+ unsigned int input, v;
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
+ if (ret)
+ return ret;
+
+ val->intval = AXP20X_VBUS_VHOLD_uV(v);
+ return 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = axp20x_read_variable_width(power->regmap,
+ AXP20X_VBUS_V_ADC_H, 12);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret * 1700; /* 1 step = 1.7 mV */
+ return 0;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
+ if (ret)
+ return ret;
+
+ switch (v & AXP20X_VBUS_CLIMIT_MASK) {
+ case AXP20X_VBUC_CLIMIT_100mA:
+ val->intval = 100000;
+ break;
+ case AXP20X_VBUC_CLIMIT_500mA:
+ val->intval = 500000;
+ break;
+ case AXP20X_VBUC_CLIMIT_900mA:
+ val->intval = 900000;
+ break;
+ case AXP20X_VBUC_CLIMIT_NONE:
+ val->intval = -1;
+ break;
+ }
+ return 0;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = axp20x_read_variable_width(power->regmap,
+ AXP20X_VBUS_I_ADC_H, 12);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret * 375; /* 1 step = 0.375 mA */
+ return 0;
+ default:
+ break;
+ }
+
+ /* All the properties below need the input-status reg value */
+ ret = regmap_read(power->regmap, AXP20X_PWR_INPUT_STATUS, &input);
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (!(input & AXP20X_PWR_STATUS_VBUS_PRESENT)) {
+ val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+ break;
+ }
+
+ ret = regmap_read(power->regmap, AXP20X_USB_OTG_STATUS, &v);
+ if (ret)
+ return ret;
+
+ if (!(v & AXP20X_USB_STATUS_VBUS_VALID)) {
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ break;
+ }
+
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = !!(input & AXP20X_PWR_STATUS_VBUS_PRESENT);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = !!(input & AXP20X_PWR_STATUS_VBUS_USED);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property axp20x_usb_power_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+static const struct power_supply_desc axp20x_usb_power_desc = {
+ .name = "axp20x-usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = axp20x_usb_power_properties,
+ .num_properties = ARRAY_SIZE(axp20x_usb_power_properties),
+ .get_property = axp20x_usb_power_get_property,
+};
+
+static int axp20x_usb_power_probe(struct platform_device *pdev)
+{
+ struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+ struct power_supply_config psy_cfg = {};
+ struct axp20x_usb_power *power;
+ static const char * const irq_names[] = { "VBUS_PLUGIN",
+ "VBUS_REMOVAL", "VBUS_VALID", "VBUS_NOT_VALID" };
+ int i, irq, ret;
+
+ if (!of_device_is_available(pdev->dev.of_node))
+ return -ENODEV;
+
+ if (!axp20x) {
+ dev_err(&pdev->dev, "Parent drvdata not set\n");
+ return -EINVAL;
+ }
+
+ power = devm_kzalloc(&pdev->dev, sizeof(*power), GFP_KERNEL);
+ if (!power)
+ return -ENOMEM;
+
+ power->regmap = axp20x->regmap;
+
+ /* Enable vbus valid checking */
+ ret = regmap_update_bits(power->regmap, AXP20X_VBUS_MON,
+ AXP20X_VBUS_MON_VBUS_VALID, AXP20X_VBUS_MON_VBUS_VALID);
+ if (ret)
+ return ret;
+
+ /* Enable vbus voltage and current measurement */
+ ret = regmap_update_bits(power->regmap, AXP20X_ADC_EN1,
+ AXP20X_ADC_EN1_VBUS_CURR | AXP20X_ADC_EN1_VBUS_VOLT,
+ AXP20X_ADC_EN1_VBUS_CURR | AXP20X_ADC_EN1_VBUS_VOLT);
+ if (ret)
+ return ret;
+
+ psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.drv_data = power;
+
+ power->supply = devm_power_supply_register(&pdev->dev,
+ &axp20x_usb_power_desc, &psy_cfg);
+ if (IS_ERR(power->supply))
+ return PTR_ERR(power->supply);
+
+ /* Request irqs after registering, as irqs may trigger immediately */
+ for (i = 0; i < ARRAY_SIZE(irq_names); i++) {
+ irq = platform_get_irq_byname(pdev, irq_names[i]);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "No IRQ for %s: %d\n",
+ irq_names[i], irq);
+ continue;
+ }
+ irq = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
+ ret = devm_request_any_context_irq(&pdev->dev, irq,
+ axp20x_usb_power_irq, 0, DRVNAME, power);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "Error requesting %s IRQ: %d\n",
+ irq_names[i], ret);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id axp20x_usb_power_match[] = {
+ { .compatible = "x-powers,axp202-usb-power-supply" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, axp20x_usb_power_match);
+
+static struct platform_driver axp20x_usb_power_driver = {
+ .probe = axp20x_usb_power_probe,
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = axp20x_usb_power_match,
+ },
+};
+
+module_platform_driver(axp20x_usb_power_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("AXP20x PMIC USB power supply status driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index ec212b5be755..4afd76848bce 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -1704,7 +1704,7 @@ error_4:
error_3:
bq2415x_power_supply_exit(bq);
error_2:
- if (bq->notify_node)
+ if (bq && bq->notify_node)
of_node_put(bq->notify_node);
kfree(name);
error_1:
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
index 469a452cbe10..f5746b9f4e83 100644
--- a/drivers/power/bq24190_charger.c
+++ b/drivers/power/bq24190_charger.c
@@ -1543,5 +1543,4 @@ module_i2c_driver(bq24190_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark A. Greer <mgreer@animalcreek.com>");
-MODULE_ALIAS("i2c:bq24190-charger");
MODULE_DESCRIPTION("TI BQ24190 Charger Driver");
diff --git a/drivers/power/bq24257_charger.c b/drivers/power/bq24257_charger.c
index 5859bc7c1616..1fea2c7ef97f 100644
--- a/drivers/power/bq24257_charger.c
+++ b/drivers/power/bq24257_charger.c
@@ -13,6 +13,10 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
+ * Datasheets:
+ * http://www.ti.com/product/bq24250
+ * http://www.ti.com/product/bq24251
+ * http://www.ti.com/product/bq24257
*/
#include <linux/module.h>
@@ -36,18 +40,33 @@
#define BQ24257_REG_7 0x06
#define BQ24257_MANUFACTURER "Texas Instruments"
-#define BQ24257_STAT_IRQ "stat"
#define BQ24257_PG_GPIO "pg"
#define BQ24257_ILIM_SET_DELAY 1000 /* msec */
+/*
+ * When adding support for new devices make sure that enum bq2425x_chip and
+ * bq2425x_chip_name[] always stay in sync!
+ */
+enum bq2425x_chip {
+ BQ24250,
+ BQ24251,
+ BQ24257,
+};
+
+static const char *const bq2425x_chip_name[] = {
+ "bq24250",
+ "bq24251",
+ "bq24257",
+};
+
enum bq24257_fields {
F_WD_FAULT, F_WD_EN, F_STAT, F_FAULT, /* REG 1 */
F_RESET, F_IILIMIT, F_EN_STAT, F_EN_TERM, F_CE, F_HZ_MODE, /* REG 2 */
F_VBAT, F_USB_DET, /* REG 3 */
F_ICHG, F_ITERM, /* REG 4 */
F_LOOP_STATUS, F_LOW_CHG, F_DPDM_EN, F_CE_STATUS, F_VINDPM, /* REG 5 */
- F_X2_TMR_EN, F_TMR, F_SYSOFF, F_TS_STAT, /* REG 6 */
+ F_X2_TMR_EN, F_TMR, F_SYSOFF, F_TS_EN, F_TS_STAT, /* REG 6 */
F_VOVP, F_CLR_VDP, F_FORCE_BATDET, F_FORCE_PTM, /* REG 7 */
F_MAX_FIELDS
@@ -58,6 +77,9 @@ struct bq24257_init_data {
u8 ichg; /* charge current */
u8 vbat; /* regulation voltage */
u8 iterm; /* termination current */
+ u8 iilimit; /* input current limit */
+ u8 vovp; /* over voltage protection voltage */
+ u8 vindpm; /* VDMP input threshold voltage */
};
struct bq24257_state {
@@ -71,6 +93,8 @@ struct bq24257_device {
struct device *dev;
struct power_supply *charger;
+ enum bq2425x_chip chip;
+
struct regmap *rmap;
struct regmap_field *rmap_fields[F_MAX_FIELDS];
@@ -82,6 +106,8 @@ struct bq24257_device {
struct bq24257_state state;
struct mutex lock; /* protect state data */
+
+ bool iilimit_autoset_enable;
};
static bool bq24257_is_volatile_reg(struct device *dev, unsigned int reg)
@@ -135,6 +161,7 @@ static const struct reg_field bq24257_reg_fields[] = {
[F_X2_TMR_EN] = REG_FIELD(BQ24257_REG_6, 7, 7),
[F_TMR] = REG_FIELD(BQ24257_REG_6, 5, 6),
[F_SYSOFF] = REG_FIELD(BQ24257_REG_6, 4, 4),
+ [F_TS_EN] = REG_FIELD(BQ24257_REG_6, 3, 3),
[F_TS_STAT] = REG_FIELD(BQ24257_REG_6, 0, 2),
/* REG 7 */
[F_VOVP] = REG_FIELD(BQ24257_REG_7, 5, 7),
@@ -169,6 +196,26 @@ static const u32 bq24257_iterm_map[] = {
#define BQ24257_ITERM_MAP_SIZE ARRAY_SIZE(bq24257_iterm_map)
+static const u32 bq24257_iilimit_map[] = {
+ 100000, 150000, 500000, 900000, 1500000, 2000000
+};
+
+#define BQ24257_IILIMIT_MAP_SIZE ARRAY_SIZE(bq24257_iilimit_map)
+
+static const u32 bq24257_vovp_map[] = {
+ 6000000, 6500000, 7000000, 8000000, 9000000, 9500000, 10000000,
+ 10500000
+};
+
+#define BQ24257_VOVP_MAP_SIZE ARRAY_SIZE(bq24257_vovp_map)
+
+static const u32 bq24257_vindpm_map[] = {
+ 4200000, 4280000, 4360000, 4440000, 4520000, 4600000, 4680000,
+ 4760000
+};
+
+#define BQ24257_VINDPM_MAP_SIZE ARRAY_SIZE(bq24257_vindpm_map)
+
static int bq24257_field_read(struct bq24257_device *bq,
enum bq24257_fields field_id)
{
@@ -220,6 +267,47 @@ enum bq24257_fault {
FAULT_INPUT_LDO_LOW,
};
+static int bq24257_get_input_current_limit(struct bq24257_device *bq,
+ union power_supply_propval *val)
+{
+ int ret;
+
+ ret = bq24257_field_read(bq, F_IILIMIT);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The "External ILIM" and "Production & Test" modes are not exposed
+ * through this driver and not being covered by the lookup table.
+ * Should such a mode have become active let's return an error rather
+ * than exceeding the bounds of the lookup table and returning
+ * garbage.
+ */
+ if (ret >= BQ24257_IILIMIT_MAP_SIZE)
+ return -ENODATA;
+
+ val->intval = bq24257_iilimit_map[ret];
+
+ return 0;
+}
+
+static int bq24257_set_input_current_limit(struct bq24257_device *bq,
+ const union power_supply_propval *val)
+{
+ /*
+ * Address the case where the user manually sets an input current limit
+ * while the charger auto-detection mechanism is is active. In this
+ * case we want to abort and go straight to the user-specified value.
+ */
+ if (bq->iilimit_autoset_enable)
+ cancel_delayed_work_sync(&bq->iilimit_setup_work);
+
+ return bq24257_field_write(bq, F_IILIMIT,
+ bq24257_find_idx(val->intval,
+ bq24257_iilimit_map,
+ BQ24257_IILIMIT_MAP_SIZE));
+}
+
static int bq24257_power_supply_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -249,6 +337,10 @@ static int bq24257_power_supply_get_property(struct power_supply *psy,
val->strval = BQ24257_MANUFACTURER;
break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = bq2425x_chip_name[bq->chip];
+ break;
+
case POWER_SUPPLY_PROP_ONLINE:
val->intval = state.power_good;
break;
@@ -300,6 +392,9 @@ static int bq24257_power_supply_get_property(struct power_supply *psy,
val->intval = bq24257_iterm_map[bq->init_data.iterm];
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return bq24257_get_input_current_limit(bq, val);
+
default:
return -EINVAL;
}
@@ -307,6 +402,31 @@ static int bq24257_power_supply_get_property(struct power_supply *psy,
return 0;
}
+static int bq24257_power_supply_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ struct bq24257_device *bq = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return bq24257_set_input_current_limit(bq, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bq24257_power_supply_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int bq24257_get_chip_state(struct bq24257_device *bq,
struct bq24257_state *state)
{
@@ -324,7 +444,26 @@ static int bq24257_get_chip_state(struct bq24257_device *bq,
state->fault = ret;
- state->power_good = !gpiod_get_value_cansleep(bq->pg);
+ if (bq->pg)
+ state->power_good = !gpiod_get_value_cansleep(bq->pg);
+ else
+ /*
+ * If we have a chip without a dedicated power-good GPIO or
+ * some other explicit bit that would provide this information
+ * assume the power is good if there is no supply related
+ * fault - and not good otherwise. There is a possibility for
+ * other errors to mask that power in fact is not good but this
+ * is probably the best we can do here.
+ */
+ switch (state->fault) {
+ case FAULT_INPUT_OVP:
+ case FAULT_INPUT_UVLO:
+ case FAULT_INPUT_LDO_LOW:
+ state->power_good = false;
+ break;
+ default:
+ state->power_good = true;
+ }
return 0;
}
@@ -361,6 +500,28 @@ enum bq24257_in_ilimit {
IILIMIT_NONE,
};
+enum bq24257_vovp {
+ VOVP_6000,
+ VOVP_6500,
+ VOVP_7000,
+ VOVP_8000,
+ VOVP_9000,
+ VOVP_9500,
+ VOVP_10000,
+ VOVP_10500
+};
+
+enum bq24257_vindpm {
+ VINDPM_4200,
+ VINDPM_4280,
+ VINDPM_4360,
+ VINDPM_4440,
+ VINDPM_4520,
+ VINDPM_4600,
+ VINDPM_4680,
+ VINDPM_4760
+};
+
enum bq24257_port_type {
PORT_TYPE_DCP, /* Dedicated Charging Port */
PORT_TYPE_CDP, /* Charging Downstream Port */
@@ -449,41 +610,43 @@ static void bq24257_handle_state_change(struct bq24257_device *bq,
{
int ret;
struct bq24257_state old_state;
- bool reset_iilimit = false;
- bool config_iilimit = false;
mutex_lock(&bq->lock);
old_state = bq->state;
mutex_unlock(&bq->lock);
- if (!new_state->power_good) { /* power removed */
- cancel_delayed_work_sync(&bq->iilimit_setup_work);
-
- /* activate D+/D- port detection algorithm */
- ret = bq24257_field_write(bq, F_DPDM_EN, 1);
+ /*
+ * Handle BQ2425x state changes observing whether the D+/D- based input
+ * current limit autoset functionality is enabled.
+ */
+ if (!new_state->power_good) {
+ dev_dbg(bq->dev, "Power removed\n");
+ if (bq->iilimit_autoset_enable) {
+ cancel_delayed_work_sync(&bq->iilimit_setup_work);
+
+ /* activate D+/D- port detection algorithm */
+ ret = bq24257_field_write(bq, F_DPDM_EN, 1);
+ if (ret < 0)
+ goto error;
+ }
+ /*
+ * When power is removed always return to the default input
+ * current limit as configured during probe.
+ */
+ ret = bq24257_field_write(bq, F_IILIMIT, bq->init_data.iilimit);
if (ret < 0)
goto error;
+ } else if (!old_state.power_good) {
+ dev_dbg(bq->dev, "Power inserted\n");
- reset_iilimit = true;
- } else if (!old_state.power_good) { /* power inserted */
- config_iilimit = true;
- } else if (new_state->fault == FAULT_NO_BAT) { /* battery removed */
- cancel_delayed_work_sync(&bq->iilimit_setup_work);
-
- reset_iilimit = true;
- } else if (old_state.fault == FAULT_NO_BAT) { /* battery connected */
- config_iilimit = true;
- } else if (new_state->fault == FAULT_TIMER) { /* safety timer expired */
- dev_err(bq->dev, "Safety timer expired! Battery dead?\n");
- }
-
- if (reset_iilimit) {
- ret = bq24257_field_write(bq, F_IILIMIT, IILIMIT_500);
- if (ret < 0)
- goto error;
- } else if (config_iilimit) {
- schedule_delayed_work(&bq->iilimit_setup_work,
+ if (bq->iilimit_autoset_enable)
+ /* configure input current limit */
+ schedule_delayed_work(&bq->iilimit_setup_work,
msecs_to_jiffies(BQ24257_ILIM_SET_DELAY));
+ } else if (new_state->fault == FAULT_NO_BAT) {
+ dev_warn(bq->dev, "Battery removed\n");
+ } else if (new_state->fault == FAULT_TIMER) {
+ dev_err(bq->dev, "Safety timer expired! Battery dead?\n");
}
return;
@@ -531,7 +694,9 @@ static int bq24257_hw_init(struct bq24257_device *bq)
} init_data[] = {
{F_ICHG, bq->init_data.ichg},
{F_VBAT, bq->init_data.vbat},
- {F_ITERM, bq->init_data.iterm}
+ {F_ITERM, bq->init_data.iterm},
+ {F_VOVP, bq->init_data.vovp},
+ {F_VINDPM, bq->init_data.vindpm},
};
/*
@@ -558,7 +723,16 @@ static int bq24257_hw_init(struct bq24257_device *bq)
bq->state = state;
mutex_unlock(&bq->lock);
- if (!state.power_good)
+ if (!bq->iilimit_autoset_enable) {
+ dev_dbg(bq->dev, "manually setting iilimit = %u\n",
+ bq->init_data.iilimit);
+
+ /* program fixed input current limit */
+ ret = bq24257_field_write(bq, F_IILIMIT,
+ bq->init_data.iilimit);
+ if (ret < 0)
+ return ret;
+ } else if (!state.power_good)
/* activate D+/D- detection algorithm */
ret = bq24257_field_write(bq, F_DPDM_EN, 1);
else if (state.fault != FAULT_NO_BAT)
@@ -569,6 +743,7 @@ static int bq24257_hw_init(struct bq24257_device *bq)
static enum power_supply_property bq24257_power_supply_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_HEALTH,
@@ -577,6 +752,7 @@ static enum power_supply_property bq24257_power_supply_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
};
static char *bq24257_charger_supplied_to[] = {
@@ -589,45 +765,127 @@ static const struct power_supply_desc bq24257_power_supply_desc = {
.properties = bq24257_power_supply_props,
.num_properties = ARRAY_SIZE(bq24257_power_supply_props),
.get_property = bq24257_power_supply_get_property,
+ .set_property = bq24257_power_supply_set_property,
+ .property_is_writeable = bq24257_power_supply_property_is_writeable,
};
-static int bq24257_power_supply_init(struct bq24257_device *bq)
+static ssize_t bq24257_show_ovp_voltage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct power_supply_config psy_cfg = { .drv_data = bq, };
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq24257_device *bq = power_supply_get_drvdata(psy);
- psy_cfg.supplied_to = bq24257_charger_supplied_to;
- psy_cfg.num_supplicants = ARRAY_SIZE(bq24257_charger_supplied_to);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ bq24257_vovp_map[bq->init_data.vovp]);
+}
- bq->charger = power_supply_register(bq->dev, &bq24257_power_supply_desc,
- &psy_cfg);
- if (IS_ERR(bq->charger))
- return PTR_ERR(bq->charger);
+static ssize_t bq24257_show_in_dpm_voltage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq24257_device *bq = power_supply_get_drvdata(psy);
- return 0;
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ bq24257_vindpm_map[bq->init_data.vindpm]);
}
-static int bq24257_irq_probe(struct bq24257_device *bq)
+static ssize_t bq24257_sysfs_show_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct gpio_desc *stat_irq;
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq24257_device *bq = power_supply_get_drvdata(psy);
+ int ret;
- stat_irq = devm_gpiod_get_index(bq->dev, BQ24257_STAT_IRQ, 0, GPIOD_IN);
- if (IS_ERR(stat_irq)) {
- dev_err(bq->dev, "could not probe stat_irq pin\n");
- return PTR_ERR(stat_irq);
- }
+ if (strcmp(attr->attr.name, "high_impedance_enable") == 0)
+ ret = bq24257_field_read(bq, F_HZ_MODE);
+ else if (strcmp(attr->attr.name, "sysoff_enable") == 0)
+ ret = bq24257_field_read(bq, F_SYSOFF);
+ else
+ return -EINVAL;
- return gpiod_to_irq(stat_irq);
+ if (ret < 0)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
}
-static int bq24257_pg_gpio_probe(struct bq24257_device *bq)
+static ssize_t bq24257_sysfs_set_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
- bq->pg = devm_gpiod_get_index(bq->dev, BQ24257_PG_GPIO, 0, GPIOD_IN);
- if (IS_ERR(bq->pg)) {
- dev_err(bq->dev, "could not probe PG pin\n");
- return PTR_ERR(bq->pg);
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq24257_device *bq = power_supply_get_drvdata(psy);
+ long val;
+ int ret;
+
+ if (kstrtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ if (strcmp(attr->attr.name, "high_impedance_enable") == 0)
+ ret = bq24257_field_write(bq, F_HZ_MODE, (bool)val);
+ else if (strcmp(attr->attr.name, "sysoff_enable") == 0)
+ ret = bq24257_field_write(bq, F_SYSOFF, (bool)val);
+ else
+ return -EINVAL;
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(ovp_voltage, S_IRUGO, bq24257_show_ovp_voltage, NULL);
+static DEVICE_ATTR(in_dpm_voltage, S_IRUGO, bq24257_show_in_dpm_voltage, NULL);
+static DEVICE_ATTR(high_impedance_enable, S_IWUSR | S_IRUGO,
+ bq24257_sysfs_show_enable, bq24257_sysfs_set_enable);
+static DEVICE_ATTR(sysoff_enable, S_IWUSR | S_IRUGO,
+ bq24257_sysfs_show_enable, bq24257_sysfs_set_enable);
+
+static struct attribute *bq24257_charger_attr[] = {
+ &dev_attr_ovp_voltage.attr,
+ &dev_attr_in_dpm_voltage.attr,
+ &dev_attr_high_impedance_enable.attr,
+ &dev_attr_sysoff_enable.attr,
+ NULL,
+};
+
+static const struct attribute_group bq24257_attr_group = {
+ .attrs = bq24257_charger_attr,
+};
+
+static int bq24257_power_supply_init(struct bq24257_device *bq)
+{
+ struct power_supply_config psy_cfg = { .drv_data = bq, };
+
+ psy_cfg.supplied_to = bq24257_charger_supplied_to;
+ psy_cfg.num_supplicants = ARRAY_SIZE(bq24257_charger_supplied_to);
+
+ bq->charger = devm_power_supply_register(bq->dev,
+ &bq24257_power_supply_desc,
+ &psy_cfg);
+
+ return PTR_ERR_OR_ZERO(bq->charger);
+}
+
+static void bq24257_pg_gpio_probe(struct bq24257_device *bq)
+{
+ bq->pg = devm_gpiod_get_optional(bq->dev, BQ24257_PG_GPIO, GPIOD_IN);
+
+ if (PTR_ERR(bq->pg) == -EPROBE_DEFER) {
+ dev_info(bq->dev, "probe retry requested for PG pin\n");
+ return;
+ } else if (IS_ERR(bq->pg)) {
+ dev_err(bq->dev, "error probing PG pin\n");
+ bq->pg = NULL;
+ return;
}
- return 0;
+ if (bq->pg)
+ dev_dbg(bq->dev, "probed PG pin = %d\n", desc_to_gpio(bq->pg));
}
static int bq24257_fw_probe(struct bq24257_device *bq)
@@ -635,6 +893,7 @@ static int bq24257_fw_probe(struct bq24257_device *bq)
int ret;
u32 property;
+ /* Required properties */
ret = device_property_read_u32(bq->dev, "ti,charge-current", &property);
if (ret < 0)
return ret;
@@ -658,6 +917,43 @@ static int bq24257_fw_probe(struct bq24257_device *bq)
bq->init_data.iterm = bq24257_find_idx(property, bq24257_iterm_map,
BQ24257_ITERM_MAP_SIZE);
+ /* Optional properties. If not provided use reasonable default. */
+ ret = device_property_read_u32(bq->dev, "ti,current-limit",
+ &property);
+ if (ret < 0) {
+ bq->iilimit_autoset_enable = true;
+
+ /*
+ * Explicitly set a default value which will be needed for
+ * devices that don't support the automatic setting of the input
+ * current limit through the charger type detection mechanism.
+ */
+ bq->init_data.iilimit = IILIMIT_500;
+ } else
+ bq->init_data.iilimit =
+ bq24257_find_idx(property,
+ bq24257_iilimit_map,
+ BQ24257_IILIMIT_MAP_SIZE);
+
+ ret = device_property_read_u32(bq->dev, "ti,ovp-voltage",
+ &property);
+ if (ret < 0)
+ bq->init_data.vovp = VOVP_6500;
+ else
+ bq->init_data.vovp = bq24257_find_idx(property,
+ bq24257_vovp_map,
+ BQ24257_VOVP_MAP_SIZE);
+
+ ret = device_property_read_u32(bq->dev, "ti,in-dpm-voltage",
+ &property);
+ if (ret < 0)
+ bq->init_data.vindpm = VINDPM_4360;
+ else
+ bq->init_data.vindpm =
+ bq24257_find_idx(property,
+ bq24257_vindpm_map,
+ BQ24257_VINDPM_MAP_SIZE);
+
return 0;
}
@@ -666,6 +962,7 @@ static int bq24257_probe(struct i2c_client *client,
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct device *dev = &client->dev;
+ const struct acpi_device_id *acpi_id;
struct bq24257_device *bq;
int ret;
int i;
@@ -682,6 +979,18 @@ static int bq24257_probe(struct i2c_client *client,
bq->client = client;
bq->dev = dev;
+ if (ACPI_HANDLE(dev)) {
+ acpi_id = acpi_match_device(dev->driver->acpi_match_table,
+ &client->dev);
+ if (!acpi_id) {
+ dev_err(dev, "Failed to match ACPI device\n");
+ return -ENODEV;
+ }
+ bq->chip = (enum bq2425x_chip)acpi_id->driver_data;
+ } else {
+ bq->chip = (enum bq2425x_chip)id->driver_data;
+ }
+
mutex_init(&bq->lock);
bq->rmap = devm_regmap_init_i2c(client, &bq24257_regmap_config);
@@ -703,8 +1012,6 @@ static int bq24257_probe(struct i2c_client *client,
i2c_set_clientdata(client, bq);
- INIT_DELAYED_WORK(&bq->iilimit_setup_work, bq24257_iilimit_setup_work);
-
if (!dev->platform_data) {
ret = bq24257_fw_probe(bq);
if (ret < 0) {
@@ -715,10 +1022,31 @@ static int bq24257_probe(struct i2c_client *client,
return -ENODEV;
}
- /* we can only check Power Good status by probing the PG pin */
- ret = bq24257_pg_gpio_probe(bq);
- if (ret < 0)
- return ret;
+ /*
+ * The BQ24250 doesn't support the D+/D- based charger type detection
+ * used for the automatic setting of the input current limit setting so
+ * explicitly disable that feature.
+ */
+ if (bq->chip == BQ24250)
+ bq->iilimit_autoset_enable = false;
+
+ if (bq->iilimit_autoset_enable)
+ INIT_DELAYED_WORK(&bq->iilimit_setup_work,
+ bq24257_iilimit_setup_work);
+
+ /*
+ * The BQ24250 doesn't have a dedicated Power Good (PG) pin so let's
+ * not probe for it and instead use a SW-based approach to determine
+ * the PG state. We also use a SW-based approach for all other devices
+ * if the PG pin is either not defined or can't be probed.
+ */
+ if (bq->chip != BQ24250)
+ bq24257_pg_gpio_probe(bq);
+
+ if (PTR_ERR(bq->pg) == -EPROBE_DEFER)
+ return PTR_ERR(bq->pg);
+ else if (!bq->pg)
+ dev_info(bq->dev, "using SW-based power-good detection\n");
/* reset all registers to defaults */
ret = bq24257_field_write(bq, F_RESET, 1);
@@ -740,36 +1068,39 @@ static int bq24257_probe(struct i2c_client *client,
return ret;
}
- if (client->irq <= 0)
- client->irq = bq24257_irq_probe(bq);
-
- if (client->irq < 0) {
- dev_err(dev, "no irq resource found\n");
- return client->irq;
- }
-
ret = devm_request_threaded_irq(dev, client->irq, NULL,
bq24257_irq_handler_thread,
IRQF_TRIGGER_FALLING |
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- BQ24257_STAT_IRQ, bq);
- if (ret)
+ bq2425x_chip_name[bq->chip], bq);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ #%d\n", client->irq);
return ret;
+ }
ret = bq24257_power_supply_init(bq);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "Failed to register power supply\n");
+ return ret;
+ }
- return ret;
+ ret = sysfs_create_group(&bq->charger->dev.kobj, &bq24257_attr_group);
+ if (ret < 0) {
+ dev_err(dev, "Can't create sysfs entries\n");
+ return ret;
+ }
+
+ return 0;
}
static int bq24257_remove(struct i2c_client *client)
{
struct bq24257_device *bq = i2c_get_clientdata(client);
- cancel_delayed_work_sync(&bq->iilimit_setup_work);
+ if (bq->iilimit_autoset_enable)
+ cancel_delayed_work_sync(&bq->iilimit_setup_work);
- power_supply_unregister(bq->charger);
+ sysfs_remove_group(&bq->charger->dev.kobj, &bq24257_attr_group);
bq24257_field_write(bq, F_RESET, 1); /* reset to defaults */
@@ -782,7 +1113,8 @@ static int bq24257_suspend(struct device *dev)
struct bq24257_device *bq = dev_get_drvdata(dev);
int ret = 0;
- cancel_delayed_work_sync(&bq->iilimit_setup_work);
+ if (bq->iilimit_autoset_enable)
+ cancel_delayed_work_sync(&bq->iilimit_setup_work);
/* reset all registers to default (and activate standalone mode) */
ret = bq24257_field_write(bq, F_RESET, 1);
@@ -823,19 +1155,25 @@ static const struct dev_pm_ops bq24257_pm = {
};
static const struct i2c_device_id bq24257_i2c_ids[] = {
- { "bq24257", 0 },
+ { "bq24250", BQ24250 },
+ { "bq24251", BQ24251 },
+ { "bq24257", BQ24257 },
{},
};
MODULE_DEVICE_TABLE(i2c, bq24257_i2c_ids);
static const struct of_device_id bq24257_of_match[] = {
+ { .compatible = "ti,bq24250", },
+ { .compatible = "ti,bq24251", },
{ .compatible = "ti,bq24257", },
{ },
};
MODULE_DEVICE_TABLE(of, bq24257_of_match);
static const struct acpi_device_id bq24257_acpi_match[] = {
- {"BQ242570", 0},
+ { "BQ242500", BQ24250 },
+ { "BQ242510", BQ24251 },
+ { "BQ242570", BQ24257 },
{},
};
MODULE_DEVICE_TABLE(acpi, bq24257_acpi_match);
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
deleted file mode 100644
index 8287261fd978..000000000000
--- a/drivers/power/bq27x00_battery.c
+++ /dev/null
@@ -1,1129 +0,0 @@
-/*
- * BQ27x00 battery driver
- *
- * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
- * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
- * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
- * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com>
- *
- * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Datasheets:
- * http://focus.ti.com/docs/prod/folders/print/bq27000.html
- * http://focus.ti.com/docs/prod/folders/print/bq27500.html
- * http://www.ti.com/product/bq27425-g1
- * http://www.ti.com/product/BQ27742-G1
- * http://www.ti.com/product/BQ27510-G3
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/param.h>
-#include <linux/jiffies.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/power_supply.h>
-#include <linux/idr.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <asm/unaligned.h>
-
-#include <linux/power/bq27x00_battery.h>
-
-#define DRIVER_VERSION "1.2.0"
-
-#define BQ27XXX_MANUFACTURER "Texas Instruments"
-
-#define BQ27x00_REG_TEMP 0x06
-#define BQ27x00_REG_VOLT 0x08
-#define BQ27x00_REG_AI 0x14
-#define BQ27x00_REG_FLAGS 0x0A
-#define BQ27x00_REG_TTE 0x16
-#define BQ27x00_REG_TTF 0x18
-#define BQ27x00_REG_TTECP 0x26
-#define BQ27x00_REG_NAC 0x0C /* Nominal available capacity */
-#define BQ27x00_REG_LMD 0x12 /* Last measured discharge */
-#define BQ27x00_REG_CYCT 0x2A /* Cycle count total */
-#define BQ27x00_REG_AE 0x22 /* Available energy */
-#define BQ27x00_POWER_AVG 0x24
-
-#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */
-#define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */
-#define BQ27000_FLAG_EDVF BIT(0) /* Final End-of-Discharge-Voltage flag */
-#define BQ27000_FLAG_EDV1 BIT(1) /* First End-of-Discharge-Voltage flag */
-#define BQ27000_FLAG_CI BIT(4) /* Capacity Inaccurate flag */
-#define BQ27000_FLAG_FC BIT(5)
-#define BQ27000_FLAG_CHGS BIT(7) /* Charge state flag */
-
-#define BQ27500_REG_SOC 0x2C
-#define BQ27500_REG_DCAP 0x3C /* Design capacity */
-#define BQ27500_FLAG_DSC BIT(0)
-#define BQ27500_FLAG_SOCF BIT(1) /* State-of-Charge threshold final */
-#define BQ27500_FLAG_SOC1 BIT(2) /* State-of-Charge threshold 1 */
-#define BQ27500_FLAG_FC BIT(9)
-#define BQ27500_FLAG_OTC BIT(15)
-
-#define BQ27742_POWER_AVG 0x76
-
-#define BQ27510_REG_SOC 0x20
-#define BQ27510_REG_DCAP 0x2E /* Design capacity */
-#define BQ27510_REG_CYCT 0x1E /* Cycle count total */
-
-/* bq27425 register addresses are same as bq27x00 addresses minus 4 */
-#define BQ27425_REG_OFFSET 0x04
-#define BQ27425_REG_SOC (0x1C + BQ27425_REG_OFFSET)
-#define BQ27425_REG_DCAP (0x3C + BQ27425_REG_OFFSET)
-
-#define BQ27000_RS 20 /* Resistor sense */
-#define BQ27x00_POWER_CONSTANT (256 * 29200 / 1000)
-
-struct bq27x00_device_info;
-struct bq27x00_access_methods {
- int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
-};
-
-enum bq27x00_chip { BQ27000, BQ27500, BQ27425, BQ27742, BQ27510};
-
-struct bq27x00_reg_cache {
- int temperature;
- int time_to_empty;
- int time_to_empty_avg;
- int time_to_full;
- int charge_full;
- int cycle_count;
- int capacity;
- int energy;
- int flags;
- int power_avg;
- int health;
-};
-
-struct bq27x00_device_info {
- struct device *dev;
- int id;
- enum bq27x00_chip chip;
-
- struct bq27x00_reg_cache cache;
- int charge_design_full;
-
- unsigned long last_update;
- struct delayed_work work;
-
- struct power_supply *bat;
-
- struct bq27x00_access_methods bus;
-
- struct mutex lock;
-};
-
-static enum power_supply_property bq27x00_battery_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_CURRENT_NOW,
- POWER_SUPPLY_PROP_CAPACITY,
- POWER_SUPPLY_PROP_CAPACITY_LEVEL,
- POWER_SUPPLY_PROP_TEMP,
- POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
- POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
- POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
- POWER_SUPPLY_PROP_TECHNOLOGY,
- POWER_SUPPLY_PROP_CHARGE_FULL,
- POWER_SUPPLY_PROP_CHARGE_NOW,
- POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
- POWER_SUPPLY_PROP_CYCLE_COUNT,
- POWER_SUPPLY_PROP_ENERGY_NOW,
- POWER_SUPPLY_PROP_POWER_AVG,
- POWER_SUPPLY_PROP_HEALTH,
- POWER_SUPPLY_PROP_MANUFACTURER,
-};
-
-static enum power_supply_property bq27425_battery_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_CURRENT_NOW,
- POWER_SUPPLY_PROP_CAPACITY,
- POWER_SUPPLY_PROP_CAPACITY_LEVEL,
- POWER_SUPPLY_PROP_TEMP,
- POWER_SUPPLY_PROP_TECHNOLOGY,
- POWER_SUPPLY_PROP_CHARGE_FULL,
- POWER_SUPPLY_PROP_CHARGE_NOW,
- POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
- POWER_SUPPLY_PROP_MANUFACTURER,
-};
-
-static enum power_supply_property bq27742_battery_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_CURRENT_NOW,
- POWER_SUPPLY_PROP_CAPACITY,
- POWER_SUPPLY_PROP_CAPACITY_LEVEL,
- POWER_SUPPLY_PROP_TEMP,
- POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
- POWER_SUPPLY_PROP_TECHNOLOGY,
- POWER_SUPPLY_PROP_CHARGE_FULL,
- POWER_SUPPLY_PROP_CHARGE_NOW,
- POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
- POWER_SUPPLY_PROP_CYCLE_COUNT,
- POWER_SUPPLY_PROP_POWER_AVG,
- POWER_SUPPLY_PROP_HEALTH,
- POWER_SUPPLY_PROP_MANUFACTURER,
-};
-
-static enum power_supply_property bq27510_battery_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_CURRENT_NOW,
- POWER_SUPPLY_PROP_CAPACITY,
- POWER_SUPPLY_PROP_CAPACITY_LEVEL,
- POWER_SUPPLY_PROP_TEMP,
- POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
- POWER_SUPPLY_PROP_TECHNOLOGY,
- POWER_SUPPLY_PROP_CHARGE_FULL,
- POWER_SUPPLY_PROP_CHARGE_NOW,
- POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
- POWER_SUPPLY_PROP_CYCLE_COUNT,
- POWER_SUPPLY_PROP_POWER_AVG,
- POWER_SUPPLY_PROP_HEALTH,
- POWER_SUPPLY_PROP_MANUFACTURER,
-};
-
-static unsigned int poll_interval = 360;
-module_param(poll_interval, uint, 0644);
-MODULE_PARM_DESC(poll_interval,
- "battery poll interval in seconds - 0 disables polling");
-
-/*
- * Common code for BQ27x00 devices
- */
-
-static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg,
- bool single)
-{
- if (di->chip == BQ27425)
- return di->bus.read(di, reg - BQ27425_REG_OFFSET, single);
- return di->bus.read(di, reg, single);
-}
-
-/*
- * Higher versions of the chip like BQ27425 and BQ27500
- * differ from BQ27000 and BQ27200 in calculation of certain
- * parameters. Hence we need to check for the chip type.
- */
-static bool bq27xxx_is_chip_version_higher(struct bq27x00_device_info *di)
-{
- if (di->chip == BQ27425 || di->chip == BQ27500 || di->chip == BQ27742
- || di->chip == BQ27510)
- return true;
- return false;
-}
-
-/*
- * Return the battery Relative State-of-Charge
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di)
-{
- int rsoc;
-
- if (di->chip == BQ27500 || di->chip == BQ27742)
- rsoc = bq27x00_read(di, BQ27500_REG_SOC, false);
- else if (di->chip == BQ27510)
- rsoc = bq27x00_read(di, BQ27510_REG_SOC, false);
- else if (di->chip == BQ27425)
- rsoc = bq27x00_read(di, BQ27425_REG_SOC, false);
- else
- rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true);
-
- if (rsoc < 0)
- dev_dbg(di->dev, "error reading relative State-of-Charge\n");
-
- return rsoc;
-}
-
-/*
- * Return a battery charge value in µAh
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg)
-{
- int charge;
-
- charge = bq27x00_read(di, reg, false);
- if (charge < 0) {
- dev_dbg(di->dev, "error reading charge register %02x: %d\n",
- reg, charge);
- return charge;
- }
-
- if (bq27xxx_is_chip_version_higher(di))
- charge *= 1000;
- else
- charge = charge * 3570 / BQ27000_RS;
-
- return charge;
-}
-
-/*
- * Return the battery Nominal available capaciy in µAh
- * Or < 0 if something fails.
- */
-static inline int bq27x00_battery_read_nac(struct bq27x00_device_info *di)
-{
- int flags;
- bool is_bq27500 = di->chip == BQ27500;
- bool is_bq27742 = di->chip == BQ27742;
- bool is_higher = bq27xxx_is_chip_version_higher(di);
- bool flags_1b = !(is_bq27500 || is_bq27742);
-
- flags = bq27x00_read(di, BQ27x00_REG_FLAGS, flags_1b);
- if (flags >= 0 && !is_higher && (flags & BQ27000_FLAG_CI))
- return -ENODATA;
-
- return bq27x00_battery_read_charge(di, BQ27x00_REG_NAC);
-}
-
-/*
- * Return the battery Last measured discharge in µAh
- * Or < 0 if something fails.
- */
-static inline int bq27x00_battery_read_lmd(struct bq27x00_device_info *di)
-{
- return bq27x00_battery_read_charge(di, BQ27x00_REG_LMD);
-}
-
-/*
- * Return the battery Initial last measured discharge in µAh
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
-{
- int ilmd;
-
- if (bq27xxx_is_chip_version_higher(di)) {
- if (di->chip == BQ27425)
- ilmd = bq27x00_read(di, BQ27425_REG_DCAP, false);
- else if (di->chip == BQ27510)
- ilmd = bq27x00_read(di, BQ27510_REG_DCAP, false);
- else
- ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false);
- } else {
- ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true);
- }
-
- if (ilmd < 0) {
- dev_dbg(di->dev, "error reading initial last measured discharge\n");
- return ilmd;
- }
-
- if (bq27xxx_is_chip_version_higher(di))
- ilmd *= 1000;
- else
- ilmd = ilmd * 256 * 3570 / BQ27000_RS;
-
- return ilmd;
-}
-
-/*
- * Return the battery Available energy in µWh
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_read_energy(struct bq27x00_device_info *di)
-{
- int ae;
-
- ae = bq27x00_read(di, BQ27x00_REG_AE, false);
- if (ae < 0) {
- dev_dbg(di->dev, "error reading available energy\n");
- return ae;
- }
-
- if (di->chip == BQ27500)
- ae *= 1000;
- else
- ae = ae * 29200 / BQ27000_RS;
-
- return ae;
-}
-
-/*
- * Return the battery temperature in tenths of degree Kelvin
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di)
-{
- int temp;
-
- temp = bq27x00_read(di, BQ27x00_REG_TEMP, false);
- if (temp < 0) {
- dev_err(di->dev, "error reading temperature\n");
- return temp;
- }
-
- if (!bq27xxx_is_chip_version_higher(di))
- temp = 5 * temp / 2;
-
- return temp;
-}
-
-/*
- * Return the battery Cycle count total
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_read_cyct(struct bq27x00_device_info *di)
-{
- int cyct;
-
- if (di->chip == BQ27510)
- cyct = bq27x00_read(di, BQ27510_REG_CYCT, false);
- else
- cyct = bq27x00_read(di, BQ27x00_REG_CYCT, false);
- if (cyct < 0)
- dev_err(di->dev, "error reading cycle count total\n");
-
- return cyct;
-}
-
-/*
- * Read a time register.
- * Return < 0 if something fails.
- */
-static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg)
-{
- int tval;
-
- tval = bq27x00_read(di, reg, false);
- if (tval < 0) {
- dev_dbg(di->dev, "error reading time register %02x: %d\n",
- reg, tval);
- return tval;
- }
-
- if (tval == 65535)
- return -ENODATA;
-
- return tval * 60;
-}
-
-/*
- * Read a power avg register.
- * Return < 0 if something fails.
- */
-static int bq27x00_battery_read_pwr_avg(struct bq27x00_device_info *di, u8 reg)
-{
- int tval;
-
- tval = bq27x00_read(di, reg, false);
- if (tval < 0) {
- dev_err(di->dev, "error reading power avg rgister %02x: %d\n",
- reg, tval);
- return tval;
- }
-
- if (di->chip == BQ27500)
- return tval;
- else
- return (tval * BQ27x00_POWER_CONSTANT) / BQ27000_RS;
-}
-
-/*
- * Read flag register.
- * Return < 0 if something fails.
- */
-static int bq27x00_battery_read_health(struct bq27x00_device_info *di)
-{
- int tval;
-
- tval = bq27x00_read(di, BQ27x00_REG_FLAGS, false);
- if (tval < 0) {
- dev_err(di->dev, "error reading flag register:%d\n", tval);
- return tval;
- }
-
- if (di->chip == BQ27500) {
- if (tval & BQ27500_FLAG_SOCF)
- tval = POWER_SUPPLY_HEALTH_DEAD;
- else if (tval & BQ27500_FLAG_OTC)
- tval = POWER_SUPPLY_HEALTH_OVERHEAT;
- else
- tval = POWER_SUPPLY_HEALTH_GOOD;
- return tval;
- } else if (di->chip == BQ27510) {
- if (tval & BQ27500_FLAG_OTC)
- return POWER_SUPPLY_HEALTH_OVERHEAT;
- return POWER_SUPPLY_HEALTH_GOOD;
- } else {
- if (tval & BQ27000_FLAG_EDV1)
- tval = POWER_SUPPLY_HEALTH_DEAD;
- else
- tval = POWER_SUPPLY_HEALTH_GOOD;
- return tval;
- }
-
- return -1;
-}
-
-static void bq27x00_update(struct bq27x00_device_info *di)
-{
- struct bq27x00_reg_cache cache = {0, };
- bool is_bq27500 = di->chip == BQ27500;
- bool is_bq27510 = di->chip == BQ27510;
- bool is_bq27425 = di->chip == BQ27425;
- bool is_bq27742 = di->chip == BQ27742;
- bool flags_1b = !(is_bq27500 || is_bq27742);
-
- cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, flags_1b);
- if ((cache.flags & 0xff) == 0xff)
- /* read error */
- cache.flags = -1;
- if (cache.flags >= 0) {
- if (!is_bq27500 && !is_bq27425 && !is_bq27742 && !is_bq27510
- && (cache.flags & BQ27000_FLAG_CI)) {
- dev_info(di->dev, "battery is not calibrated! ignoring capacity values\n");
- cache.capacity = -ENODATA;
- cache.energy = -ENODATA;
- cache.time_to_empty = -ENODATA;
- cache.time_to_empty_avg = -ENODATA;
- cache.time_to_full = -ENODATA;
- cache.charge_full = -ENODATA;
- cache.health = -ENODATA;
- } else {
- cache.capacity = bq27x00_battery_read_rsoc(di);
- if (is_bq27742 || is_bq27510)
- cache.time_to_empty =
- bq27x00_battery_read_time(di,
- BQ27x00_REG_TTE);
- else if (!is_bq27425) {
- cache.energy = bq27x00_battery_read_energy(di);
- cache.time_to_empty =
- bq27x00_battery_read_time(di,
- BQ27x00_REG_TTE);
- cache.time_to_empty_avg =
- bq27x00_battery_read_time(di,
- BQ27x00_REG_TTECP);
- cache.time_to_full =
- bq27x00_battery_read_time(di,
- BQ27x00_REG_TTF);
- }
- cache.charge_full = bq27x00_battery_read_lmd(di);
- cache.health = bq27x00_battery_read_health(di);
- }
- cache.temperature = bq27x00_battery_read_temperature(di);
- if (!is_bq27425)
- cache.cycle_count = bq27x00_battery_read_cyct(di);
- if (is_bq27742)
- cache.power_avg =
- bq27x00_battery_read_pwr_avg(di,
- BQ27742_POWER_AVG);
- else
- cache.power_avg =
- bq27x00_battery_read_pwr_avg(di,
- BQ27x00_POWER_AVG);
-
- /* We only have to read charge design full once */
- if (di->charge_design_full <= 0)
- di->charge_design_full = bq27x00_battery_read_ilmd(di);
- }
-
- if (di->cache.capacity != cache.capacity)
- power_supply_changed(di->bat);
-
- if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
- di->cache = cache;
-
- di->last_update = jiffies;
-}
-
-static void bq27x00_battery_poll(struct work_struct *work)
-{
- struct bq27x00_device_info *di =
- container_of(work, struct bq27x00_device_info, work.work);
-
- bq27x00_update(di);
-
- if (poll_interval > 0) {
- /* The timer does not have to be accurate. */
- set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
- schedule_delayed_work(&di->work, poll_interval * HZ);
- }
-}
-
-/*
- * Return the battery average current in µA
- * Note that current can be negative signed as well
- * Or 0 if something fails.
- */
-static int bq27x00_battery_current(struct bq27x00_device_info *di,
- union power_supply_propval *val)
-{
- int curr;
- int flags;
-
- curr = bq27x00_read(di, BQ27x00_REG_AI, false);
- if (curr < 0) {
- dev_err(di->dev, "error reading current\n");
- return curr;
- }
-
- if (bq27xxx_is_chip_version_higher(di)) {
- /* bq27500 returns signed value */
- val->intval = (int)((s16)curr) * 1000;
- } else {
- flags = bq27x00_read(di, BQ27x00_REG_FLAGS, false);
- if (flags & BQ27000_FLAG_CHGS) {
- dev_dbg(di->dev, "negative current!\n");
- curr = -curr;
- }
-
- val->intval = curr * 3570 / BQ27000_RS;
- }
-
- return 0;
-}
-
-static int bq27x00_battery_status(struct bq27x00_device_info *di,
- union power_supply_propval *val)
-{
- int status;
-
- if (bq27xxx_is_chip_version_higher(di)) {
- if (di->cache.flags & BQ27500_FLAG_FC)
- status = POWER_SUPPLY_STATUS_FULL;
- else if (di->cache.flags & BQ27500_FLAG_DSC)
- status = POWER_SUPPLY_STATUS_DISCHARGING;
- else
- status = POWER_SUPPLY_STATUS_CHARGING;
- } else {
- if (di->cache.flags & BQ27000_FLAG_FC)
- status = POWER_SUPPLY_STATUS_FULL;
- else if (di->cache.flags & BQ27000_FLAG_CHGS)
- status = POWER_SUPPLY_STATUS_CHARGING;
- else if (power_supply_am_i_supplied(di->bat))
- status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- else
- status = POWER_SUPPLY_STATUS_DISCHARGING;
- }
-
- val->intval = status;
-
- return 0;
-}
-
-static int bq27x00_battery_capacity_level(struct bq27x00_device_info *di,
- union power_supply_propval *val)
-{
- int level;
-
- if (bq27xxx_is_chip_version_higher(di)) {
- if (di->cache.flags & BQ27500_FLAG_FC)
- level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
- else if (di->cache.flags & BQ27500_FLAG_SOC1)
- level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
- else if (di->cache.flags & BQ27500_FLAG_SOCF)
- level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
- else
- level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
- } else {
- if (di->cache.flags & BQ27000_FLAG_FC)
- level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
- else if (di->cache.flags & BQ27000_FLAG_EDV1)
- level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
- else if (di->cache.flags & BQ27000_FLAG_EDVF)
- level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
- else
- level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
- }
-
- val->intval = level;
-
- return 0;
-}
-
-/*
- * Return the battery Voltage in millivolts
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
- union power_supply_propval *val)
-{
- int volt;
-
- volt = bq27x00_read(di, BQ27x00_REG_VOLT, false);
- if (volt < 0) {
- dev_err(di->dev, "error reading voltage\n");
- return volt;
- }
-
- val->intval = volt * 1000;
-
- return 0;
-}
-
-static int bq27x00_simple_value(int value,
- union power_supply_propval *val)
-{
- if (value < 0)
- return value;
-
- val->intval = value;
-
- return 0;
-}
-
-static int bq27x00_battery_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- int ret = 0;
- struct bq27x00_device_info *di = power_supply_get_drvdata(psy);
-
- mutex_lock(&di->lock);
- if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
- cancel_delayed_work_sync(&di->work);
- bq27x00_battery_poll(&di->work.work);
- }
- mutex_unlock(&di->lock);
-
- if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
- return -ENODEV;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- ret = bq27x00_battery_status(di, val);
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = bq27x00_battery_voltage(di, val);
- break;
- case POWER_SUPPLY_PROP_PRESENT:
- val->intval = di->cache.flags < 0 ? 0 : 1;
- break;
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- ret = bq27x00_battery_current(di, val);
- break;
- case POWER_SUPPLY_PROP_CAPACITY:
- ret = bq27x00_simple_value(di->cache.capacity, val);
- break;
- case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
- ret = bq27x00_battery_capacity_level(di, val);
- break;
- case POWER_SUPPLY_PROP_TEMP:
- ret = bq27x00_simple_value(di->cache.temperature, val);
- if (ret == 0)
- val->intval -= 2731;
- break;
- case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
- ret = bq27x00_simple_value(di->cache.time_to_empty, val);
- break;
- case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
- ret = bq27x00_simple_value(di->cache.time_to_empty_avg, val);
- break;
- case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
- ret = bq27x00_simple_value(di->cache.time_to_full, val);
- break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
- break;
- case POWER_SUPPLY_PROP_CHARGE_NOW:
- ret = bq27x00_simple_value(bq27x00_battery_read_nac(di), val);
- break;
- case POWER_SUPPLY_PROP_CHARGE_FULL:
- ret = bq27x00_simple_value(di->cache.charge_full, val);
- break;
- case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- ret = bq27x00_simple_value(di->charge_design_full, val);
- break;
- case POWER_SUPPLY_PROP_CYCLE_COUNT:
- ret = bq27x00_simple_value(di->cache.cycle_count, val);
- break;
- case POWER_SUPPLY_PROP_ENERGY_NOW:
- ret = bq27x00_simple_value(di->cache.energy, val);
- break;
- case POWER_SUPPLY_PROP_POWER_AVG:
- ret = bq27x00_simple_value(di->cache.power_avg, val);
- break;
- case POWER_SUPPLY_PROP_HEALTH:
- ret = bq27x00_simple_value(di->cache.health, val);
- break;
- case POWER_SUPPLY_PROP_MANUFACTURER:
- val->strval = BQ27XXX_MANUFACTURER;
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static void bq27x00_external_power_changed(struct power_supply *psy)
-{
- struct bq27x00_device_info *di = power_supply_get_drvdata(psy);
-
- cancel_delayed_work_sync(&di->work);
- schedule_delayed_work(&di->work, 0);
-}
-
-static int bq27x00_powersupply_init(struct bq27x00_device_info *di,
- const char *name)
-{
- int ret;
- struct power_supply_desc *psy_desc;
- struct power_supply_config psy_cfg = { .drv_data = di, };
-
- psy_desc = devm_kzalloc(di->dev, sizeof(*psy_desc), GFP_KERNEL);
- if (!psy_desc)
- return -ENOMEM;
-
- psy_desc->name = name;
- psy_desc->type = POWER_SUPPLY_TYPE_BATTERY;
- if (di->chip == BQ27425) {
- psy_desc->properties = bq27425_battery_props;
- psy_desc->num_properties = ARRAY_SIZE(bq27425_battery_props);
- } else if (di->chip == BQ27742) {
- psy_desc->properties = bq27742_battery_props;
- psy_desc->num_properties = ARRAY_SIZE(bq27742_battery_props);
- } else if (di->chip == BQ27510) {
- psy_desc->properties = bq27510_battery_props;
- psy_desc->num_properties = ARRAY_SIZE(bq27510_battery_props);
- } else {
- psy_desc->properties = bq27x00_battery_props;
- psy_desc->num_properties = ARRAY_SIZE(bq27x00_battery_props);
- }
- psy_desc->get_property = bq27x00_battery_get_property;
- psy_desc->external_power_changed = bq27x00_external_power_changed;
-
- INIT_DELAYED_WORK(&di->work, bq27x00_battery_poll);
- mutex_init(&di->lock);
-
- di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg);
- if (IS_ERR(di->bat)) {
- ret = PTR_ERR(di->bat);
- dev_err(di->dev, "failed to register battery: %d\n", ret);
- return ret;
- }
-
- dev_info(di->dev, "support ver. %s enabled\n", DRIVER_VERSION);
-
- bq27x00_update(di);
-
- return 0;
-}
-
-static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di)
-{
- /*
- * power_supply_unregister call bq27x00_battery_get_property which
- * call bq27x00_battery_poll.
- * Make sure that bq27x00_battery_poll will not call
- * schedule_delayed_work again after unregister (which cause OOPS).
- */
- poll_interval = 0;
-
- cancel_delayed_work_sync(&di->work);
-
- power_supply_unregister(di->bat);
-
- mutex_destroy(&di->lock);
-}
-
-/* i2c specific code */
-#ifdef CONFIG_BATTERY_BQ27X00_I2C
-
-/* If the system has several batteries we need a different name for each
- * of them...
- */
-static DEFINE_IDR(battery_id);
-static DEFINE_MUTEX(battery_mutex);
-
-static int bq27x00_read_i2c(struct bq27x00_device_info *di, u8 reg, bool single)
-{
- struct i2c_client *client = to_i2c_client(di->dev);
- struct i2c_msg msg[2];
- unsigned char data[2];
- int ret;
-
- if (!client->adapter)
- return -ENODEV;
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].buf = &reg;
- msg[0].len = sizeof(reg);
- msg[1].addr = client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].buf = data;
- if (single)
- msg[1].len = 1;
- else
- msg[1].len = 2;
-
- ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
- if (ret < 0)
- return ret;
-
- if (!single)
- ret = get_unaligned_le16(data);
- else
- ret = data[0];
-
- return ret;
-}
-
-static int bq27x00_battery_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- char *name;
- struct bq27x00_device_info *di;
- int num;
- int retval = 0;
-
- /* Get new ID for the new battery device */
- mutex_lock(&battery_mutex);
- num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
- mutex_unlock(&battery_mutex);
- if (num < 0)
- return num;
-
- name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
- if (!name) {
- retval = -ENOMEM;
- goto batt_failed;
- }
-
- di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
- if (!di) {
- retval = -ENOMEM;
- goto batt_failed;
- }
-
- di->id = num;
- di->dev = &client->dev;
- di->chip = id->driver_data;
- di->bus.read = &bq27x00_read_i2c;
-
- retval = bq27x00_powersupply_init(di, name);
- if (retval)
- goto batt_failed;
-
- i2c_set_clientdata(client, di);
-
- return 0;
-
-batt_failed:
- mutex_lock(&battery_mutex);
- idr_remove(&battery_id, num);
- mutex_unlock(&battery_mutex);
-
- return retval;
-}
-
-static int bq27x00_battery_remove(struct i2c_client *client)
-{
- struct bq27x00_device_info *di = i2c_get_clientdata(client);
-
- bq27x00_powersupply_unregister(di);
-
- mutex_lock(&battery_mutex);
- idr_remove(&battery_id, di->id);
- mutex_unlock(&battery_mutex);
-
- return 0;
-}
-
-static const struct i2c_device_id bq27x00_id[] = {
- { "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */
- { "bq27500", BQ27500 },
- { "bq27425", BQ27425 },
- { "bq27742", BQ27742 },
- { "bq27510", BQ27510 },
- {},
-};
-MODULE_DEVICE_TABLE(i2c, bq27x00_id);
-
-static struct i2c_driver bq27x00_battery_driver = {
- .driver = {
- .name = "bq27x00-battery",
- },
- .probe = bq27x00_battery_probe,
- .remove = bq27x00_battery_remove,
- .id_table = bq27x00_id,
-};
-
-static inline int bq27x00_battery_i2c_init(void)
-{
- int ret = i2c_add_driver(&bq27x00_battery_driver);
-
- if (ret)
- pr_err("Unable to register BQ27x00 i2c driver\n");
-
- return ret;
-}
-
-static inline void bq27x00_battery_i2c_exit(void)
-{
- i2c_del_driver(&bq27x00_battery_driver);
-}
-
-#else
-
-static inline int bq27x00_battery_i2c_init(void) { return 0; }
-static inline void bq27x00_battery_i2c_exit(void) {};
-
-#endif
-
-/* platform specific code */
-#ifdef CONFIG_BATTERY_BQ27X00_PLATFORM
-
-static int bq27000_read_platform(struct bq27x00_device_info *di, u8 reg,
- bool single)
-{
- struct device *dev = di->dev;
- struct bq27000_platform_data *pdata = dev->platform_data;
- unsigned int timeout = 3;
- int upper, lower;
- int temp;
-
- if (!single) {
- /* Make sure the value has not changed in between reading the
- * lower and the upper part */
- upper = pdata->read(dev, reg + 1);
- do {
- temp = upper;
- if (upper < 0)
- return upper;
-
- lower = pdata->read(dev, reg);
- if (lower < 0)
- return lower;
-
- upper = pdata->read(dev, reg + 1);
- } while (temp != upper && --timeout);
-
- if (timeout == 0)
- return -EIO;
-
- return (upper << 8) | lower;
- }
-
- return pdata->read(dev, reg);
-}
-
-static int bq27000_battery_probe(struct platform_device *pdev)
-{
- struct bq27x00_device_info *di;
- struct bq27000_platform_data *pdata = pdev->dev.platform_data;
- const char *name;
-
- if (!pdata) {
- dev_err(&pdev->dev, "no platform_data supplied\n");
- return -EINVAL;
- }
-
- if (!pdata->read) {
- dev_err(&pdev->dev, "no hdq read callback supplied\n");
- return -EINVAL;
- }
-
- di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
- if (!di)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, di);
-
- di->dev = &pdev->dev;
- di->chip = BQ27000;
-
- name = pdata->name ?: dev_name(&pdev->dev);
- di->bus.read = &bq27000_read_platform;
-
- return bq27x00_powersupply_init(di, name);
-}
-
-static int bq27000_battery_remove(struct platform_device *pdev)
-{
- struct bq27x00_device_info *di = platform_get_drvdata(pdev);
-
- bq27x00_powersupply_unregister(di);
-
- return 0;
-}
-
-static struct platform_driver bq27000_battery_driver = {
- .probe = bq27000_battery_probe,
- .remove = bq27000_battery_remove,
- .driver = {
- .name = "bq27000-battery",
- },
-};
-
-static inline int bq27x00_battery_platform_init(void)
-{
- int ret = platform_driver_register(&bq27000_battery_driver);
-
- if (ret)
- pr_err("Unable to register BQ27000 platform driver\n");
-
- return ret;
-}
-
-static inline void bq27x00_battery_platform_exit(void)
-{
- platform_driver_unregister(&bq27000_battery_driver);
-}
-
-#else
-
-static inline int bq27x00_battery_platform_init(void) { return 0; }
-static inline void bq27x00_battery_platform_exit(void) {};
-
-#endif
-
-/*
- * Module stuff
- */
-
-static int __init bq27x00_battery_init(void)
-{
- int ret;
-
- ret = bq27x00_battery_i2c_init();
- if (ret)
- return ret;
-
- ret = bq27x00_battery_platform_init();
- if (ret)
- bq27x00_battery_i2c_exit();
-
- return ret;
-}
-module_init(bq27x00_battery_init);
-
-static void __exit bq27x00_battery_exit(void)
-{
- bq27x00_battery_platform_exit();
- bq27x00_battery_i2c_exit();
-}
-module_exit(bq27x00_battery_exit);
-
-#ifdef CONFIG_BATTERY_BQ27X00_PLATFORM
-MODULE_ALIAS("platform:bq27000-battery");
-#endif
-
-#ifdef CONFIG_BATTERY_BQ27X00_I2C
-MODULE_ALIAS("i2c:bq27000-battery");
-#endif
-
-MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
-MODULE_DESCRIPTION("BQ27x00 battery monitor driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/power/bq27xxx_battery.c b/drivers/power/bq27xxx_battery.c
new file mode 100644
index 000000000000..880233ce9343
--- /dev/null
+++ b/drivers/power/bq27xxx_battery.c
@@ -0,0 +1,1375 @@
+/*
+ * BQ27xxx battery driver
+ *
+ * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
+ * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
+ * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Datasheets:
+ * http://www.ti.com/product/bq27000
+ * http://www.ti.com/product/bq27200
+ * http://www.ti.com/product/bq27010
+ * http://www.ti.com/product/bq27210
+ * http://www.ti.com/product/bq27500
+ * http://www.ti.com/product/bq27510-g3
+ * http://www.ti.com/product/bq27520-g4
+ * http://www.ti.com/product/bq27530-g1
+ * http://www.ti.com/product/bq27531-g1
+ * http://www.ti.com/product/bq27541-g1
+ * http://www.ti.com/product/bq27542-g1
+ * http://www.ti.com/product/bq27546-g1
+ * http://www.ti.com/product/bq27742-g1
+ * http://www.ti.com/product/bq27545-g1
+ * http://www.ti.com/product/bq27421-g1
+ * http://www.ti.com/product/bq27425-g1
+ * http://www.ti.com/product/bq27411-g1
+ * http://www.ti.com/product/bq27621-g1
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/idr.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <asm/unaligned.h>
+
+#include <linux/power/bq27xxx_battery.h>
+
+#define DRIVER_VERSION "1.2.0"
+
+#define BQ27XXX_MANUFACTURER "Texas Instruments"
+
+/* BQ27XXX Flags */
+#define BQ27XXX_FLAG_DSC BIT(0)
+#define BQ27XXX_FLAG_SOCF BIT(1) /* State-of-Charge threshold final */
+#define BQ27XXX_FLAG_SOC1 BIT(2) /* State-of-Charge threshold 1 */
+#define BQ27XXX_FLAG_FC BIT(9)
+#define BQ27XXX_FLAG_OTD BIT(14)
+#define BQ27XXX_FLAG_OTC BIT(15)
+#define BQ27XXX_FLAG_UT BIT(14)
+#define BQ27XXX_FLAG_OT BIT(15)
+
+/* BQ27000 has different layout for Flags register */
+#define BQ27000_FLAG_EDVF BIT(0) /* Final End-of-Discharge-Voltage flag */
+#define BQ27000_FLAG_EDV1 BIT(1) /* First End-of-Discharge-Voltage flag */
+#define BQ27000_FLAG_CI BIT(4) /* Capacity Inaccurate flag */
+#define BQ27000_FLAG_FC BIT(5)
+#define BQ27000_FLAG_CHGS BIT(7) /* Charge state flag */
+
+#define BQ27XXX_RS (20) /* Resistor sense mOhm */
+#define BQ27XXX_POWER_CONSTANT (29200) /* 29.2 µV^2 * 1000 */
+#define BQ27XXX_CURRENT_CONSTANT (3570) /* 3.57 µV * 1000 */
+
+struct bq27xxx_device_info;
+struct bq27xxx_access_methods {
+ int (*read)(struct bq27xxx_device_info *di, u8 reg, bool single);
+};
+
+#define INVALID_REG_ADDR 0xff
+
+/*
+ * bq27xxx_reg_index - Register names
+ *
+ * These are indexes into a device's register mapping array.
+ */
+enum bq27xxx_reg_index {
+ BQ27XXX_REG_CTRL = 0, /* Control */
+ BQ27XXX_REG_TEMP, /* Temperature */
+ BQ27XXX_REG_INT_TEMP, /* Internal Temperature */
+ BQ27XXX_REG_VOLT, /* Voltage */
+ BQ27XXX_REG_AI, /* Average Current */
+ BQ27XXX_REG_FLAGS, /* Flags */
+ BQ27XXX_REG_TTE, /* Time-to-Empty */
+ BQ27XXX_REG_TTF, /* Time-to-Full */
+ BQ27XXX_REG_TTES, /* Time-to-Empty Standby */
+ BQ27XXX_REG_TTECP, /* Time-to-Empty at Constant Power */
+ BQ27XXX_REG_NAC, /* Nominal Available Capacity */
+ BQ27XXX_REG_FCC, /* Full Charge Capacity */
+ BQ27XXX_REG_CYCT, /* Cycle Count */
+ BQ27XXX_REG_AE, /* Available Energy */
+ BQ27XXX_REG_SOC, /* State-of-Charge */
+ BQ27XXX_REG_DCAP, /* Design Capacity */
+ BQ27XXX_REG_AP, /* Average Power */
+};
+
+struct bq27xxx_reg_cache {
+ int temperature;
+ int time_to_empty;
+ int time_to_empty_avg;
+ int time_to_full;
+ int charge_full;
+ int cycle_count;
+ int capacity;
+ int energy;
+ int flags;
+ int power_avg;
+ int health;
+};
+
+struct bq27xxx_device_info {
+ struct device *dev;
+ int id;
+ enum bq27xxx_chip chip;
+
+ struct bq27xxx_reg_cache cache;
+ int charge_design_full;
+
+ unsigned long last_update;
+ struct delayed_work work;
+
+ struct power_supply *bat;
+
+ struct bq27xxx_access_methods bus;
+
+ struct mutex lock;
+
+ u8 *regs;
+};
+
+/* Register mappings */
+static u8 bq27000_regs[] = {
+ 0x00, /* CONTROL */
+ 0x06, /* TEMP */
+ INVALID_REG_ADDR, /* INT TEMP - NA*/
+ 0x08, /* VOLT */
+ 0x14, /* AVG CURR */
+ 0x0a, /* FLAGS */
+ 0x16, /* TTE */
+ 0x18, /* TTF */
+ 0x1c, /* TTES */
+ 0x26, /* TTECP */
+ 0x0c, /* NAC */
+ 0x12, /* LMD(FCC) */
+ 0x2a, /* CYCT */
+ 0x22, /* AE */
+ 0x0b, /* SOC(RSOC) */
+ 0x76, /* DCAP(ILMD) */
+ 0x24, /* AP */
+};
+
+static u8 bq27010_regs[] = {
+ 0x00, /* CONTROL */
+ 0x06, /* TEMP */
+ INVALID_REG_ADDR, /* INT TEMP - NA*/
+ 0x08, /* VOLT */
+ 0x14, /* AVG CURR */
+ 0x0a, /* FLAGS */
+ 0x16, /* TTE */
+ 0x18, /* TTF */
+ 0x1c, /* TTES */
+ 0x26, /* TTECP */
+ 0x0c, /* NAC */
+ 0x12, /* LMD(FCC) */
+ 0x2a, /* CYCT */
+ INVALID_REG_ADDR, /* AE - NA */
+ 0x0b, /* SOC(RSOC) */
+ 0x76, /* DCAP(ILMD) */
+ INVALID_REG_ADDR, /* AP - NA */
+};
+
+static u8 bq27500_regs[] = {
+ 0x00, /* CONTROL */
+ 0x06, /* TEMP */
+ 0x28, /* INT TEMP */
+ 0x08, /* VOLT */
+ 0x14, /* AVG CURR */
+ 0x0a, /* FLAGS */
+ 0x16, /* TTE */
+ INVALID_REG_ADDR, /* TTF - NA */
+ 0x1a, /* TTES */
+ INVALID_REG_ADDR, /* TTECP - NA */
+ 0x0c, /* NAC */
+ 0x12, /* LMD(FCC) */
+ 0x1e, /* CYCT */
+ INVALID_REG_ADDR, /* AE - NA */
+ 0x20, /* SOC(RSOC) */
+ 0x2e, /* DCAP(ILMD) */
+ INVALID_REG_ADDR, /* AP - NA */
+};
+
+static u8 bq27530_regs[] = {
+ 0x00, /* CONTROL */
+ 0x06, /* TEMP */
+ 0x32, /* INT TEMP */
+ 0x08, /* VOLT */
+ 0x14, /* AVG CURR */
+ 0x0a, /* FLAGS */
+ 0x16, /* TTE */
+ INVALID_REG_ADDR, /* TTF - NA */
+ INVALID_REG_ADDR, /* TTES - NA */
+ INVALID_REG_ADDR, /* TTECP - NA */
+ 0x0c, /* NAC */
+ 0x12, /* LMD(FCC) */
+ 0x2a, /* CYCT */
+ INVALID_REG_ADDR, /* AE - NA */
+ 0x2c, /* SOC(RSOC) */
+ INVALID_REG_ADDR, /* DCAP - NA */
+ 0x24, /* AP */
+};
+
+static u8 bq27541_regs[] = {
+ 0x00, /* CONTROL */
+ 0x06, /* TEMP */
+ 0x28, /* INT TEMP */
+ 0x08, /* VOLT */
+ 0x14, /* AVG CURR */
+ 0x0a, /* FLAGS */
+ 0x16, /* TTE */
+ INVALID_REG_ADDR, /* TTF - NA */
+ INVALID_REG_ADDR, /* TTES - NA */
+ INVALID_REG_ADDR, /* TTECP - NA */
+ 0x0c, /* NAC */
+ 0x12, /* LMD(FCC) */
+ 0x2a, /* CYCT */
+ INVALID_REG_ADDR, /* AE - NA */
+ 0x2c, /* SOC(RSOC) */
+ 0x3c, /* DCAP */
+ 0x76, /* AP */
+};
+
+static u8 bq27545_regs[] = {
+ 0x00, /* CONTROL */
+ 0x06, /* TEMP */
+ 0x28, /* INT TEMP */
+ 0x08, /* VOLT */
+ 0x14, /* AVG CURR */
+ 0x0a, /* FLAGS */
+ 0x16, /* TTE */
+ INVALID_REG_ADDR, /* TTF - NA */
+ INVALID_REG_ADDR, /* TTES - NA */
+ INVALID_REG_ADDR, /* TTECP - NA */
+ 0x0c, /* NAC */
+ 0x12, /* LMD(FCC) */
+ 0x2a, /* CYCT */
+ INVALID_REG_ADDR, /* AE - NA */
+ 0x2c, /* SOC(RSOC) */
+ INVALID_REG_ADDR, /* DCAP - NA */
+ 0x24, /* AP */
+};
+
+static u8 bq27421_regs[] = {
+ 0x00, /* CONTROL */
+ 0x02, /* TEMP */
+ 0x1e, /* INT TEMP */
+ 0x04, /* VOLT */
+ 0x10, /* AVG CURR */
+ 0x06, /* FLAGS */
+ INVALID_REG_ADDR, /* TTE - NA */
+ INVALID_REG_ADDR, /* TTF - NA */
+ INVALID_REG_ADDR, /* TTES - NA */
+ INVALID_REG_ADDR, /* TTECP - NA */
+ 0x08, /* NAC */
+ 0x0e, /* FCC */
+ INVALID_REG_ADDR, /* CYCT - NA */
+ INVALID_REG_ADDR, /* AE - NA */
+ 0x1c, /* SOC */
+ 0x3c, /* DCAP */
+ 0x18, /* AP */
+};
+
+static u8 *bq27xxx_regs[] = {
+ [BQ27000] = bq27000_regs,
+ [BQ27010] = bq27010_regs,
+ [BQ27500] = bq27500_regs,
+ [BQ27530] = bq27530_regs,
+ [BQ27541] = bq27541_regs,
+ [BQ27545] = bq27545_regs,
+ [BQ27421] = bq27421_regs,
+};
+
+static enum power_supply_property bq27000_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27010_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27500_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27530_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27541_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27545_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27421_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+#define BQ27XXX_PROP(_id, _prop) \
+ [_id] = { \
+ .props = _prop, \
+ .size = ARRAY_SIZE(_prop), \
+ }
+
+static struct {
+ enum power_supply_property *props;
+ size_t size;
+} bq27xxx_battery_props[] = {
+ BQ27XXX_PROP(BQ27000, bq27000_battery_props),
+ BQ27XXX_PROP(BQ27010, bq27010_battery_props),
+ BQ27XXX_PROP(BQ27500, bq27500_battery_props),
+ BQ27XXX_PROP(BQ27530, bq27530_battery_props),
+ BQ27XXX_PROP(BQ27541, bq27541_battery_props),
+ BQ27XXX_PROP(BQ27545, bq27545_battery_props),
+ BQ27XXX_PROP(BQ27421, bq27421_battery_props),
+};
+
+static unsigned int poll_interval = 360;
+module_param(poll_interval, uint, 0644);
+MODULE_PARM_DESC(poll_interval,
+ "battery poll interval in seconds - 0 disables polling");
+
+/*
+ * Common code for BQ27xxx devices
+ */
+
+static inline int bq27xxx_read(struct bq27xxx_device_info *di, int reg_index,
+ bool single)
+{
+ /* Reports EINVAL for invalid/missing registers */
+ if (!di || di->regs[reg_index] == INVALID_REG_ADDR)
+ return -EINVAL;
+
+ return di->bus.read(di, di->regs[reg_index], single);
+}
+
+/*
+ * Return the battery State-of-Charge
+ * Or < 0 if something fails.
+ */
+static int bq27xxx_battery_read_soc(struct bq27xxx_device_info *di)
+{
+ int soc;
+
+ soc = bq27xxx_read(di, BQ27XXX_REG_SOC, false);
+
+ if (soc < 0)
+ dev_dbg(di->dev, "error reading State-of-Charge\n");
+
+ return soc;
+}
+
+/*
+ * Return a battery charge value in µAh
+ * Or < 0 if something fails.
+ */
+static int bq27xxx_battery_read_charge(struct bq27xxx_device_info *di, u8 reg)
+{
+ int charge;
+
+ charge = bq27xxx_read(di, reg, false);
+ if (charge < 0) {
+ dev_dbg(di->dev, "error reading charge register %02x: %d\n",
+ reg, charge);
+ return charge;
+ }
+
+ if (di->chip == BQ27000 || di->chip == BQ27010)
+ charge *= BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS;
+ else
+ charge *= 1000;
+
+ return charge;
+}
+
+/*
+ * Return the battery Nominal available capacity in µAh
+ * Or < 0 if something fails.
+ */
+static inline int bq27xxx_battery_read_nac(struct bq27xxx_device_info *di)
+{
+ int flags;
+
+ if (di->chip == BQ27000 || di->chip == BQ27010) {
+ flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, true);
+ if (flags >= 0 && (flags & BQ27000_FLAG_CI))
+ return -ENODATA;
+ }
+
+ return bq27xxx_battery_read_charge(di, BQ27XXX_REG_NAC);
+}
+
+/*
+ * Return the battery Full Charge Capacity in µAh
+ * Or < 0 if something fails.
+ */
+static inline int bq27xxx_battery_read_fcc(struct bq27xxx_device_info *di)
+{
+ return bq27xxx_battery_read_charge(di, BQ27XXX_REG_FCC);
+}
+
+/*
+ * Return the Design Capacity in µAh
+ * Or < 0 if something fails.
+ */
+static int bq27xxx_battery_read_dcap(struct bq27xxx_device_info *di)
+{
+ int dcap;
+
+ dcap = bq27xxx_read(di, BQ27XXX_REG_DCAP, false);
+
+ if (dcap < 0) {
+ dev_dbg(di->dev, "error reading initial last measured discharge\n");
+ return dcap;
+ }
+
+ if (di->chip == BQ27000 || di->chip == BQ27010)
+ dcap *= BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS;
+ else
+ dcap *= 1000;
+
+ return dcap;
+}
+
+/*
+ * Return the battery Available energy in µWh
+ * Or < 0 if something fails.
+ */
+static int bq27xxx_battery_read_energy(struct bq27xxx_device_info *di)
+{
+ int ae;
+
+ ae = bq27xxx_read(di, BQ27XXX_REG_AE, false);
+ if (ae < 0) {
+ dev_dbg(di->dev, "error reading available energy\n");
+ return ae;
+ }
+
+ if (di->chip == BQ27000 || di->chip == BQ27010)
+ ae *= BQ27XXX_POWER_CONSTANT / BQ27XXX_RS;
+ else
+ ae *= 1000;
+
+ return ae;
+}
+
+/*
+ * Return the battery temperature in tenths of degree Kelvin
+ * Or < 0 if something fails.
+ */
+static int bq27xxx_battery_read_temperature(struct bq27xxx_device_info *di)
+{
+ int temp;
+
+ temp = bq27xxx_read(di, BQ27XXX_REG_TEMP, false);
+ if (temp < 0) {
+ dev_err(di->dev, "error reading temperature\n");
+ return temp;
+ }
+
+ if (di->chip == BQ27000 || di->chip == BQ27010)
+ temp = 5 * temp / 2;
+
+ return temp;
+}
+
+/*
+ * Return the battery Cycle count total
+ * Or < 0 if something fails.
+ */
+static int bq27xxx_battery_read_cyct(struct bq27xxx_device_info *di)
+{
+ int cyct;
+
+ cyct = bq27xxx_read(di, BQ27XXX_REG_CYCT, false);
+ if (cyct < 0)
+ dev_err(di->dev, "error reading cycle count total\n");
+
+ return cyct;
+}
+
+/*
+ * Read a time register.
+ * Return < 0 if something fails.
+ */
+static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg)
+{
+ int tval;
+
+ tval = bq27xxx_read(di, reg, false);
+ if (tval < 0) {
+ dev_dbg(di->dev, "error reading time register %02x: %d\n",
+ reg, tval);
+ return tval;
+ }
+
+ if (tval == 65535)
+ return -ENODATA;
+
+ return tval * 60;
+}
+
+/*
+ * Read an average power register.
+ * Return < 0 if something fails.
+ */
+static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
+{
+ int tval;
+
+ tval = bq27xxx_read(di, BQ27XXX_REG_AP, false);
+ if (tval < 0) {
+ dev_err(di->dev, "error reading average power register %02x: %d\n",
+ BQ27XXX_REG_AP, tval);
+ return tval;
+ }
+
+ if (di->chip == BQ27000 || di->chip == BQ27010)
+ return (tval * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
+ else
+ return tval;
+}
+
+/*
+ * Returns true if a battery over temperature condition is detected
+ */
+static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags)
+{
+ if (di->chip == BQ27500 || di->chip == BQ27541 || di->chip == BQ27545)
+ return flags & (BQ27XXX_FLAG_OTC | BQ27XXX_FLAG_OTD);
+ if (di->chip == BQ27530 || di->chip == BQ27421)
+ return flags & BQ27XXX_FLAG_OT;
+
+ return false;
+}
+
+/*
+ * Returns true if a battery under temperature condition is detected
+ */
+static bool bq27xxx_battery_undertemp(struct bq27xxx_device_info *di, u16 flags)
+{
+ if (di->chip == BQ27530 || di->chip == BQ27421)
+ return flags & BQ27XXX_FLAG_UT;
+
+ return false;
+}
+
+/*
+ * Returns true if a low state of charge condition is detected
+ */
+static bool bq27xxx_battery_dead(struct bq27xxx_device_info *di, u16 flags)
+{
+ if (di->chip == BQ27000 || di->chip == BQ27010)
+ return flags & (BQ27000_FLAG_EDV1 | BQ27000_FLAG_EDVF);
+ else
+ return flags & (BQ27XXX_FLAG_SOC1 | BQ27XXX_FLAG_SOCF);
+}
+
+/*
+ * Read flag register.
+ * Return < 0 if something fails.
+ */
+static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
+{
+ int flags;
+
+ flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, false);
+ if (flags < 0) {
+ dev_err(di->dev, "error reading flag register:%d\n", flags);
+ return flags;
+ }
+
+ /* Unlikely but important to return first */
+ if (unlikely(bq27xxx_battery_overtemp(di, flags)))
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+ if (unlikely(bq27xxx_battery_undertemp(di, flags)))
+ return POWER_SUPPLY_HEALTH_COLD;
+ if (unlikely(bq27xxx_battery_dead(di, flags)))
+ return POWER_SUPPLY_HEALTH_DEAD;
+
+ return POWER_SUPPLY_HEALTH_GOOD;
+}
+
+static void bq27xxx_battery_update(struct bq27xxx_device_info *di)
+{
+ struct bq27xxx_reg_cache cache = {0, };
+ bool has_ci_flag = di->chip == BQ27000 || di->chip == BQ27010;
+ bool has_singe_flag = di->chip == BQ27000 || di->chip == BQ27010;
+
+ cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
+ if ((cache.flags & 0xff) == 0xff)
+ cache.flags = -1; /* read error */
+ if (cache.flags >= 0) {
+ cache.temperature = bq27xxx_battery_read_temperature(di);
+ if (has_ci_flag && (cache.flags & BQ27000_FLAG_CI)) {
+ dev_info(di->dev, "battery is not calibrated! ignoring capacity values\n");
+ cache.capacity = -ENODATA;
+ cache.energy = -ENODATA;
+ cache.time_to_empty = -ENODATA;
+ cache.time_to_empty_avg = -ENODATA;
+ cache.time_to_full = -ENODATA;
+ cache.charge_full = -ENODATA;
+ cache.health = -ENODATA;
+ } else {
+ if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
+ cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
+ if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
+ cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
+ if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
+ cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
+ cache.charge_full = bq27xxx_battery_read_fcc(di);
+ cache.capacity = bq27xxx_battery_read_soc(di);
+ if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
+ cache.energy = bq27xxx_battery_read_energy(di);
+ cache.health = bq27xxx_battery_read_health(di);
+ }
+ if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
+ cache.cycle_count = bq27xxx_battery_read_cyct(di);
+ if (di->regs[BQ27XXX_REG_AP] != INVALID_REG_ADDR)
+ cache.power_avg = bq27xxx_battery_read_pwr_avg(di);
+
+ /* We only have to read charge design full once */
+ if (di->charge_design_full <= 0)
+ di->charge_design_full = bq27xxx_battery_read_dcap(di);
+ }
+
+ if (di->cache.capacity != cache.capacity)
+ power_supply_changed(di->bat);
+
+ if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
+ di->cache = cache;
+
+ di->last_update = jiffies;
+}
+
+static void bq27xxx_battery_poll(struct work_struct *work)
+{
+ struct bq27xxx_device_info *di =
+ container_of(work, struct bq27xxx_device_info,
+ work.work);
+
+ bq27xxx_battery_update(di);
+
+ if (poll_interval > 0) {
+ /* The timer does not have to be accurate. */
+ set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
+ schedule_delayed_work(&di->work, poll_interval * HZ);
+ }
+}
+
+/*
+ * Return the battery average current in µA
+ * Note that current can be negative signed as well
+ * Or 0 if something fails.
+ */
+static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
+ union power_supply_propval *val)
+{
+ int curr;
+ int flags;
+
+ curr = bq27xxx_read(di, BQ27XXX_REG_AI, false);
+ if (curr < 0) {
+ dev_err(di->dev, "error reading current\n");
+ return curr;
+ }
+
+ if (di->chip == BQ27000 || di->chip == BQ27010) {
+ flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, false);
+ if (flags & BQ27000_FLAG_CHGS) {
+ dev_dbg(di->dev, "negative current!\n");
+ curr = -curr;
+ }
+
+ val->intval = curr * BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS;
+ } else {
+ /* Other gauges return signed value */
+ val->intval = (int)((s16)curr) * 1000;
+ }
+
+ return 0;
+}
+
+static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
+ union power_supply_propval *val)
+{
+ int status;
+
+ if (di->chip == BQ27000 || di->chip == BQ27010) {
+ if (di->cache.flags & BQ27000_FLAG_FC)
+ status = POWER_SUPPLY_STATUS_FULL;
+ else if (di->cache.flags & BQ27000_FLAG_CHGS)
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ else if (power_supply_am_i_supplied(di->bat))
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ } else {
+ if (di->cache.flags & BQ27XXX_FLAG_FC)
+ status = POWER_SUPPLY_STATUS_FULL;
+ else if (di->cache.flags & BQ27XXX_FLAG_DSC)
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ }
+
+ val->intval = status;
+
+ return 0;
+}
+
+static int bq27xxx_battery_capacity_level(struct bq27xxx_device_info *di,
+ union power_supply_propval *val)
+{
+ int level;
+
+ if (di->chip == BQ27000 || di->chip == BQ27010) {
+ if (di->cache.flags & BQ27000_FLAG_FC)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else if (di->cache.flags & BQ27000_FLAG_EDV1)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (di->cache.flags & BQ27000_FLAG_EDVF)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else
+ level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ } else {
+ if (di->cache.flags & BQ27XXX_FLAG_FC)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else if (di->cache.flags & BQ27XXX_FLAG_SOC1)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (di->cache.flags & BQ27XXX_FLAG_SOCF)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else
+ level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ }
+
+ val->intval = level;
+
+ return 0;
+}
+
+/*
+ * Return the battery Voltage in millivolts
+ * Or < 0 if something fails.
+ */
+static int bq27xxx_battery_voltage(struct bq27xxx_device_info *di,
+ union power_supply_propval *val)
+{
+ int volt;
+
+ volt = bq27xxx_read(di, BQ27XXX_REG_VOLT, false);
+ if (volt < 0) {
+ dev_err(di->dev, "error reading voltage\n");
+ return volt;
+ }
+
+ val->intval = volt * 1000;
+
+ return 0;
+}
+
+static int bq27xxx_simple_value(int value,
+ union power_supply_propval *val)
+{
+ if (value < 0)
+ return value;
+
+ val->intval = value;
+
+ return 0;
+}
+
+static int bq27xxx_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int ret = 0;
+ struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
+
+ mutex_lock(&di->lock);
+ if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
+ cancel_delayed_work_sync(&di->work);
+ bq27xxx_battery_poll(&di->work.work);
+ }
+ mutex_unlock(&di->lock);
+
+ if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
+ return -ENODEV;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = bq27xxx_battery_status(di, val);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = bq27xxx_battery_voltage(di, val);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = di->cache.flags < 0 ? 0 : 1;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = bq27xxx_battery_current(di, val);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = bq27xxx_simple_value(di->cache.capacity, val);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ ret = bq27xxx_battery_capacity_level(di, val);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ ret = bq27xxx_simple_value(di->cache.temperature, val);
+ if (ret == 0)
+ val->intval -= 2731; /* convert decidegree k to c */
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ ret = bq27xxx_simple_value(di->cache.time_to_empty, val);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ ret = bq27xxx_simple_value(di->cache.time_to_empty_avg, val);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
+ ret = bq27xxx_simple_value(di->cache.time_to_full, val);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ ret = bq27xxx_simple_value(bq27xxx_battery_read_nac(di), val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ ret = bq27xxx_simple_value(di->cache.charge_full, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ ret = bq27xxx_simple_value(di->charge_design_full, val);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ ret = bq27xxx_simple_value(di->cache.cycle_count, val);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ ret = bq27xxx_simple_value(di->cache.energy, val);
+ break;
+ case POWER_SUPPLY_PROP_POWER_AVG:
+ ret = bq27xxx_simple_value(di->cache.power_avg, val);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = bq27xxx_simple_value(di->cache.health, val);
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = BQ27XXX_MANUFACTURER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void bq27xxx_external_power_changed(struct power_supply *psy)
+{
+ struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
+
+ cancel_delayed_work_sync(&di->work);
+ schedule_delayed_work(&di->work, 0);
+}
+
+static int bq27xxx_powersupply_init(struct bq27xxx_device_info *di,
+ const char *name)
+{
+ int ret;
+ struct power_supply_desc *psy_desc;
+ struct power_supply_config psy_cfg = { .drv_data = di, };
+
+ psy_desc = devm_kzalloc(di->dev, sizeof(*psy_desc), GFP_KERNEL);
+ if (!psy_desc)
+ return -ENOMEM;
+
+ psy_desc->name = name;
+ psy_desc->type = POWER_SUPPLY_TYPE_BATTERY;
+ psy_desc->properties = bq27xxx_battery_props[di->chip].props;
+ psy_desc->num_properties = bq27xxx_battery_props[di->chip].size;
+ psy_desc->get_property = bq27xxx_battery_get_property;
+ psy_desc->external_power_changed = bq27xxx_external_power_changed;
+
+ INIT_DELAYED_WORK(&di->work, bq27xxx_battery_poll);
+ mutex_init(&di->lock);
+
+ di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg);
+ if (IS_ERR(di->bat)) {
+ ret = PTR_ERR(di->bat);
+ dev_err(di->dev, "failed to register battery: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(di->dev, "support ver. %s enabled\n", DRIVER_VERSION);
+
+ bq27xxx_battery_update(di);
+
+ return 0;
+}
+
+static void bq27xxx_powersupply_unregister(struct bq27xxx_device_info *di)
+{
+ /*
+ * power_supply_unregister call bq27xxx_battery_get_property which
+ * call bq27xxx_battery_poll.
+ * Make sure that bq27xxx_battery_poll will not call
+ * schedule_delayed_work again after unregister (which cause OOPS).
+ */
+ poll_interval = 0;
+
+ cancel_delayed_work_sync(&di->work);
+
+ power_supply_unregister(di->bat);
+
+ mutex_destroy(&di->lock);
+}
+
+/* i2c specific code */
+#ifdef CONFIG_BATTERY_BQ27XXX_I2C
+
+/* If the system has several batteries we need a different name for each
+ * of them...
+ */
+static DEFINE_IDR(battery_id);
+static DEFINE_MUTEX(battery_mutex);
+
+static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data)
+{
+ struct bq27xxx_device_info *di = data;
+
+ bq27xxx_battery_update(di);
+
+ return IRQ_HANDLED;
+}
+
+static int bq27xxx_battery_i2c_read(struct bq27xxx_device_info *di, u8 reg,
+ bool single)
+{
+ struct i2c_client *client = to_i2c_client(di->dev);
+ struct i2c_msg msg[2];
+ unsigned char data[2];
+ int ret;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].buf = &reg;
+ msg[0].len = sizeof(reg);
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+ if (single)
+ msg[1].len = 1;
+ else
+ msg[1].len = 2;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < 0)
+ return ret;
+
+ if (!single)
+ ret = get_unaligned_le16(data);
+ else
+ ret = data[0];
+
+ return ret;
+}
+
+static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ char *name;
+ struct bq27xxx_device_info *di;
+ int num;
+ int retval = 0;
+
+ /* Get new ID for the new battery device */
+ mutex_lock(&battery_mutex);
+ num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
+ mutex_unlock(&battery_mutex);
+ if (num < 0)
+ return num;
+
+ name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
+ if (!name) {
+ retval = -ENOMEM;
+ goto batt_failed;
+ }
+
+ di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
+ if (!di) {
+ retval = -ENOMEM;
+ goto batt_failed;
+ }
+
+ di->id = num;
+ di->dev = &client->dev;
+ di->chip = id->driver_data;
+ di->bus.read = &bq27xxx_battery_i2c_read;
+ di->regs = bq27xxx_regs[di->chip];
+
+ retval = bq27xxx_powersupply_init(di, name);
+ if (retval)
+ goto batt_failed;
+
+ /* Schedule a polling after about 1 min */
+ schedule_delayed_work(&di->work, 60 * HZ);
+
+ i2c_set_clientdata(client, di);
+
+ if (client->irq) {
+ retval = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, bq27xxx_battery_irq_handler_thread,
+ IRQF_ONESHOT,
+ name, di);
+ if (retval) {
+ dev_err(&client->dev,
+ "Unable to register IRQ %d error %d\n",
+ client->irq, retval);
+ return retval;
+ }
+ }
+
+ return 0;
+
+batt_failed:
+ mutex_lock(&battery_mutex);
+ idr_remove(&battery_id, num);
+ mutex_unlock(&battery_mutex);
+
+ return retval;
+}
+
+static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
+{
+ struct bq27xxx_device_info *di = i2c_get_clientdata(client);
+
+ bq27xxx_powersupply_unregister(di);
+
+ mutex_lock(&battery_mutex);
+ idr_remove(&battery_id, di->id);
+ mutex_unlock(&battery_mutex);
+
+ return 0;
+}
+
+static const struct i2c_device_id bq27xxx_id[] = {
+ { "bq27200", BQ27000 },
+ { "bq27210", BQ27010 },
+ { "bq27500", BQ27500 },
+ { "bq27510", BQ27500 },
+ { "bq27520", BQ27500 },
+ { "bq27530", BQ27530 },
+ { "bq27531", BQ27530 },
+ { "bq27541", BQ27541 },
+ { "bq27542", BQ27541 },
+ { "bq27546", BQ27541 },
+ { "bq27742", BQ27541 },
+ { "bq27545", BQ27545 },
+ { "bq27421", BQ27421 },
+ { "bq27425", BQ27421 },
+ { "bq27441", BQ27421 },
+ { "bq27621", BQ27421 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, bq27xxx_id);
+
+static struct i2c_driver bq27xxx_battery_i2c_driver = {
+ .driver = {
+ .name = "bq27xxx-battery",
+ },
+ .probe = bq27xxx_battery_i2c_probe,
+ .remove = bq27xxx_battery_i2c_remove,
+ .id_table = bq27xxx_id,
+};
+
+static inline int bq27xxx_battery_i2c_init(void)
+{
+ int ret = i2c_add_driver(&bq27xxx_battery_i2c_driver);
+
+ if (ret)
+ pr_err("Unable to register BQ27xxx i2c driver\n");
+
+ return ret;
+}
+
+static inline void bq27xxx_battery_i2c_exit(void)
+{
+ i2c_del_driver(&bq27xxx_battery_i2c_driver);
+}
+
+#else
+
+static inline int bq27xxx_battery_i2c_init(void) { return 0; }
+static inline void bq27xxx_battery_i2c_exit(void) {};
+
+#endif
+
+/* platform specific code */
+#ifdef CONFIG_BATTERY_BQ27XXX_PLATFORM
+
+static int bq27xxx_battery_platform_read(struct bq27xxx_device_info *di, u8 reg,
+ bool single)
+{
+ struct device *dev = di->dev;
+ struct bq27xxx_platform_data *pdata = dev->platform_data;
+ unsigned int timeout = 3;
+ int upper, lower;
+ int temp;
+
+ if (!single) {
+ /* Make sure the value has not changed in between reading the
+ * lower and the upper part */
+ upper = pdata->read(dev, reg + 1);
+ do {
+ temp = upper;
+ if (upper < 0)
+ return upper;
+
+ lower = pdata->read(dev, reg);
+ if (lower < 0)
+ return lower;
+
+ upper = pdata->read(dev, reg + 1);
+ } while (temp != upper && --timeout);
+
+ if (timeout == 0)
+ return -EIO;
+
+ return (upper << 8) | lower;
+ }
+
+ return pdata->read(dev, reg);
+}
+
+static int bq27xxx_battery_platform_probe(struct platform_device *pdev)
+{
+ struct bq27xxx_device_info *di;
+ struct bq27xxx_platform_data *pdata = pdev->dev.platform_data;
+ const char *name;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform_data supplied\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->read) {
+ dev_err(&pdev->dev, "no hdq read callback supplied\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->chip) {
+ dev_err(&pdev->dev, "no device supplied\n");
+ return -EINVAL;
+ }
+
+ di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, di);
+
+ di->dev = &pdev->dev;
+ di->chip = pdata->chip;
+ di->regs = bq27xxx_regs[di->chip];
+
+ name = pdata->name ?: dev_name(&pdev->dev);
+ di->bus.read = &bq27xxx_battery_platform_read;
+
+ return bq27xxx_powersupply_init(di, name);
+}
+
+static int bq27xxx_battery_platform_remove(struct platform_device *pdev)
+{
+ struct bq27xxx_device_info *di = platform_get_drvdata(pdev);
+
+ bq27xxx_powersupply_unregister(di);
+
+ return 0;
+}
+
+static struct platform_driver bq27xxx_battery_platform_driver = {
+ .probe = bq27xxx_battery_platform_probe,
+ .remove = bq27xxx_battery_platform_remove,
+ .driver = {
+ .name = "bq27000-battery",
+ },
+};
+
+static inline int bq27xxx_battery_platform_init(void)
+{
+ int ret = platform_driver_register(&bq27xxx_battery_platform_driver);
+
+ if (ret)
+ pr_err("Unable to register BQ27xxx platform driver\n");
+
+ return ret;
+}
+
+static inline void bq27xxx_battery_platform_exit(void)
+{
+ platform_driver_unregister(&bq27xxx_battery_platform_driver);
+}
+
+#else
+
+static inline int bq27xxx_battery_platform_init(void) { return 0; }
+static inline void bq27xxx_battery_platform_exit(void) {};
+
+#endif
+
+/*
+ * Module stuff
+ */
+
+static int __init bq27xxx_battery_init(void)
+{
+ int ret;
+
+ ret = bq27xxx_battery_i2c_init();
+ if (ret)
+ return ret;
+
+ ret = bq27xxx_battery_platform_init();
+ if (ret)
+ bq27xxx_battery_i2c_exit();
+
+ return ret;
+}
+module_init(bq27xxx_battery_init);
+
+static void __exit bq27xxx_battery_exit(void)
+{
+ bq27xxx_battery_platform_exit();
+ bq27xxx_battery_i2c_exit();
+}
+module_exit(bq27xxx_battery_exit);
+
+#ifdef CONFIG_BATTERY_BQ27XXX_PLATFORM
+MODULE_ALIAS("platform:bq27000-battery");
+#endif
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
+MODULE_DESCRIPTION("BQ27xxx battery monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 907293e6f2a4..1ea5d1aa268b 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -1581,8 +1581,10 @@ static struct charger_desc *of_cm_parse_desc(struct device *dev)
cables = devm_kzalloc(dev, sizeof(*cables)
* chg_regs->num_cables,
GFP_KERNEL);
- if (!cables)
+ if (!cables) {
+ of_node_put(child);
return ERR_PTR(-ENOMEM);
+ }
chg_regs->cables = cables;
diff --git a/drivers/power/lp8727_charger.c b/drivers/power/lp8727_charger.c
index 7e741f1d3cd5..042fb3dacb46 100644
--- a/drivers/power/lp8727_charger.c
+++ b/drivers/power/lp8727_charger.c
@@ -508,23 +508,23 @@ out:
return param;
}
-static int lp8727_parse_dt(struct device *dev)
+static struct lp8727_platform_data *lp8727_parse_dt(struct device *dev)
{
struct device_node *np = dev->of_node;
struct device_node *child;
struct lp8727_platform_data *pdata;
const char *type;
- /* If charging parameter is not defined, just skip parsing the dt */
- if (of_get_child_count(np) == 0)
- goto out;
-
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
of_property_read_u32(np, "debounce-ms", &pdata->debounce_msec);
+ /* If charging parameter is not defined, just skip parsing the dt */
+ if (of_get_child_count(np) == 0)
+ return pdata;
+
for_each_child_of_node(np, child) {
of_property_read_string(child, "charger-type", &type);
@@ -535,29 +535,30 @@ static int lp8727_parse_dt(struct device *dev)
pdata->usb = lp8727_parse_charge_pdata(dev, child);
}
- dev->platform_data = pdata;
-out:
- return 0;
+ return pdata;
}
#else
-static int lp8727_parse_dt(struct device *dev)
+static struct lp8727_platform_data *lp8727_parse_dt(struct device *dev)
{
- return 0;
+ return NULL;
}
#endif
static int lp8727_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
struct lp8727_chg *pchg;
+ struct lp8727_platform_data *pdata;
int ret;
if (!i2c_check_functionality(cl->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
return -EIO;
if (cl->dev.of_node) {
- ret = lp8727_parse_dt(&cl->dev);
- if (ret)
- return ret;
+ pdata = lp8727_parse_dt(&cl->dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ } else {
+ pdata = dev_get_platdata(&cl->dev);
}
pchg = devm_kzalloc(&cl->dev, sizeof(*pchg), GFP_KERNEL);
@@ -566,7 +567,7 @@ static int lp8727_probe(struct i2c_client *cl, const struct i2c_device_id *id)
pchg->client = cl;
pchg->dev = &cl->dev;
- pchg->pdata = cl->dev.platform_data;
+ pchg->pdata = pdata;
i2c_set_clientdata(cl, pchg);
mutex_init(&pchg->xfer_lock);
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index e89255764745..9c65f134d447 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -909,18 +909,21 @@ static int max17042_probe(struct i2c_client *client,
regmap_write(chip->regmap, MAX17042_LearnCFG, 0x0007);
}
- chip->battery = power_supply_register(&client->dev, max17042_desc,
- &psy_cfg);
+ chip->battery = devm_power_supply_register(&client->dev, max17042_desc,
+ &psy_cfg);
if (IS_ERR(chip->battery)) {
dev_err(&client->dev, "failed: power supply register\n");
return PTR_ERR(chip->battery);
}
if (client->irq) {
- ret = request_threaded_irq(client->irq, NULL,
- max17042_thread_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- chip->battery->desc->name, chip);
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL,
+ max17042_thread_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ chip->battery->desc->name,
+ chip);
if (!ret) {
regmap_update_bits(chip->regmap, MAX17042_CONFIG,
CONFIG_ALRT_BIT_ENBL,
@@ -944,16 +947,6 @@ static int max17042_probe(struct i2c_client *client,
return 0;
}
-static int max17042_remove(struct i2c_client *client)
-{
- struct max17042_chip *chip = i2c_get_clientdata(client);
-
- if (client->irq)
- free_irq(client->irq, chip);
- power_supply_unregister(chip->battery);
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int max17042_suspend(struct device *dev)
{
@@ -1014,7 +1007,6 @@ static struct i2c_driver max17042_i2c_driver = {
.pm = &max17042_pm_ops,
},
.probe = max17042_probe,
- .remove = max17042_remove,
.id_table = max17042_id,
};
module_i2c_driver(max17042_i2c_driver);
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
index bf2b4b3a7cae..6d39d52040d4 100644
--- a/drivers/power/max8903_charger.c
+++ b/drivers/power/max8903_charger.c
@@ -201,8 +201,7 @@ static int max8903_probe(struct platform_device *pdev)
if (pdata->dc_valid == false && pdata->usb_valid == false) {
dev_err(dev, "No valid power sources.\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
if (pdata->dc_valid) {
@@ -216,8 +215,7 @@ static int max8903_probe(struct platform_device *pdev)
} else {
dev_err(dev, "When DC is wired, DOK and DCM should"
" be wired as well.\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
} else {
if (pdata->dcm) {
@@ -225,8 +223,7 @@ static int max8903_probe(struct platform_device *pdev)
gpio_set_value(pdata->dcm, 0);
else {
dev_err(dev, "Invalid pin: dcm.\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
}
}
@@ -238,8 +235,7 @@ static int max8903_probe(struct platform_device *pdev)
} else {
dev_err(dev, "When USB is wired, UOK should be wired."
"as well.\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
}
@@ -248,32 +244,28 @@ static int max8903_probe(struct platform_device *pdev)
gpio_set_value(pdata->cen, (ta_in || usb_in) ? 0 : 1);
} else {
dev_err(dev, "Invalid pin: cen.\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
}
if (pdata->chg) {
if (!gpio_is_valid(pdata->chg)) {
dev_err(dev, "Invalid pin: chg.\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
}
if (pdata->flt) {
if (!gpio_is_valid(pdata->flt)) {
dev_err(dev, "Invalid pin: flt.\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
}
if (pdata->usus) {
if (!gpio_is_valid(pdata->usus)) {
dev_err(dev, "Invalid pin: usus.\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
}
@@ -291,85 +283,56 @@ static int max8903_probe(struct platform_device *pdev)
psy_cfg.drv_data = data;
- data->psy = power_supply_register(dev, &data->psy_desc, &psy_cfg);
+ data->psy = devm_power_supply_register(dev, &data->psy_desc, &psy_cfg);
if (IS_ERR(data->psy)) {
dev_err(dev, "failed: power supply register.\n");
- ret = PTR_ERR(data->psy);
- goto err;
+ return PTR_ERR(data->psy);
}
if (pdata->dc_valid) {
- ret = request_threaded_irq(gpio_to_irq(pdata->dok),
- NULL, max8903_dcin,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "MAX8903 DC IN", data);
+ ret = devm_request_threaded_irq(dev, gpio_to_irq(pdata->dok),
+ NULL, max8903_dcin,
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING,
+ "MAX8903 DC IN", data);
if (ret) {
dev_err(dev, "Cannot request irq %d for DC (%d)\n",
gpio_to_irq(pdata->dok), ret);
- goto err_psy;
+ return ret;
}
}
if (pdata->usb_valid) {
- ret = request_threaded_irq(gpio_to_irq(pdata->uok),
- NULL, max8903_usbin,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "MAX8903 USB IN", data);
+ ret = devm_request_threaded_irq(dev, gpio_to_irq(pdata->uok),
+ NULL, max8903_usbin,
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING,
+ "MAX8903 USB IN", data);
if (ret) {
dev_err(dev, "Cannot request irq %d for USB (%d)\n",
gpio_to_irq(pdata->uok), ret);
- goto err_dc_irq;
+ return ret;
}
}
if (pdata->flt) {
- ret = request_threaded_irq(gpio_to_irq(pdata->flt),
- NULL, max8903_fault,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "MAX8903 Fault", data);
+ ret = devm_request_threaded_irq(dev, gpio_to_irq(pdata->flt),
+ NULL, max8903_fault,
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING,
+ "MAX8903 Fault", data);
if (ret) {
dev_err(dev, "Cannot request irq %d for Fault (%d)\n",
gpio_to_irq(pdata->flt), ret);
- goto err_usb_irq;
+ return ret;
}
}
return 0;
-
-err_usb_irq:
- if (pdata->usb_valid)
- free_irq(gpio_to_irq(pdata->uok), data);
-err_dc_irq:
- if (pdata->dc_valid)
- free_irq(gpio_to_irq(pdata->dok), data);
-err_psy:
- power_supply_unregister(data->psy);
-err:
- return ret;
-}
-
-static int max8903_remove(struct platform_device *pdev)
-{
- struct max8903_data *data = platform_get_drvdata(pdev);
-
- if (data) {
- struct max8903_pdata *pdata = &data->pdata;
-
- if (pdata->flt)
- free_irq(gpio_to_irq(pdata->flt), data);
- if (pdata->usb_valid)
- free_irq(gpio_to_irq(pdata->uok), data);
- if (pdata->dc_valid)
- free_irq(gpio_to_irq(pdata->dok), data);
- power_supply_unregister(data->psy);
- }
-
- return 0;
}
static struct platform_driver max8903_driver = {
.probe = max8903_probe,
- .remove = max8903_remove,
.driver = {
.name = "max8903-charger",
},
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c
index 47448d4bc6cd..b64cf0f14142 100644
--- a/drivers/power/max8998_charger.c
+++ b/drivers/power/max8998_charger.c
@@ -117,8 +117,7 @@ static int max8998_battery_probe(struct platform_device *pdev)
"EOC value not set: leave it unchanged.\n");
} else {
dev_err(max8998->dev, "Invalid EOC value\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
/* Setup Charge Restart Level */
@@ -141,8 +140,7 @@ static int max8998_battery_probe(struct platform_device *pdev)
break;
default:
dev_err(max8998->dev, "Invalid Restart Level\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
/* Setup Charge Full Timeout */
@@ -165,34 +163,22 @@ static int max8998_battery_probe(struct platform_device *pdev)
break;
default:
dev_err(max8998->dev, "Invalid Full Timeout value\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
psy_cfg.drv_data = max8998;
- max8998->battery = power_supply_register(max8998->dev,
- &max8998_battery_desc,
- &psy_cfg);
+ max8998->battery = devm_power_supply_register(max8998->dev,
+ &max8998_battery_desc,
+ &psy_cfg);
if (IS_ERR(max8998->battery)) {
ret = PTR_ERR(max8998->battery);
dev_err(max8998->dev, "failed: power supply register: %d\n",
ret);
- goto err;
+ return ret;
}
return 0;
-err:
- return ret;
-}
-
-static int max8998_battery_remove(struct platform_device *pdev)
-{
- struct max8998_battery_data *max8998 = platform_get_drvdata(pdev);
-
- power_supply_unregister(max8998->battery);
-
- return 0;
}
static const struct platform_device_id max8998_battery_id[] = {
@@ -205,7 +191,6 @@ static struct platform_driver max8998_battery_driver = {
.name = "max8998-battery",
},
.probe = max8998_battery_probe,
- .remove = max8998_battery_remove,
.id_table = max8998_battery_id,
};
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c
index 3a45cc0c4dce..8f9bd1d0eeb6 100644
--- a/drivers/power/pm2301_charger.c
+++ b/drivers/power/pm2301_charger.c
@@ -1264,5 +1264,4 @@ module_exit(pm2xxx_charger_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay");
-MODULE_ALIAS("i2c:pm2xxx-charger");
MODULE_DESCRIPTION("PM2xxx charger management driver");
diff --git a/drivers/power/qcom_smbb.c b/drivers/power/qcom_smbb.c
new file mode 100644
index 000000000000..5eb1e9e543e2
--- /dev/null
+++ b/drivers/power/qcom_smbb.c
@@ -0,0 +1,951 @@
+/* Copyright (c) 2014, Sony Mobile Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver is for the multi-block Switch-Mode Battery Charger and Boost
+ * (SMBB) hardware, found in Qualcomm PM8941 PMICs. The charger is an
+ * integrated, single-cell lithium-ion battery charger.
+ *
+ * Sub-components:
+ * - Charger core
+ * - Buck
+ * - DC charge-path
+ * - USB charge-path
+ * - Battery interface
+ * - Boost (not implemented)
+ * - Misc
+ * - HF-Buck
+ */
+
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define SMBB_CHG_VMAX 0x040
+#define SMBB_CHG_VSAFE 0x041
+#define SMBB_CHG_CFG 0x043
+#define SMBB_CHG_IMAX 0x044
+#define SMBB_CHG_ISAFE 0x045
+#define SMBB_CHG_VIN_MIN 0x047
+#define SMBB_CHG_CTRL 0x049
+#define CTRL_EN BIT(7)
+#define SMBB_CHG_VBAT_WEAK 0x052
+#define SMBB_CHG_IBAT_TERM_CHG 0x05b
+#define IBAT_TERM_CHG_IEOC BIT(7)
+#define IBAT_TERM_CHG_IEOC_BMS BIT(7)
+#define IBAT_TERM_CHG_IEOC_CHG 0
+#define SMBB_CHG_VBAT_DET 0x05d
+#define SMBB_CHG_TCHG_MAX_EN 0x060
+#define TCHG_MAX_EN BIT(7)
+#define SMBB_CHG_WDOG_TIME 0x062
+#define SMBB_CHG_WDOG_EN 0x065
+#define WDOG_EN BIT(7)
+
+#define SMBB_BUCK_REG_MODE 0x174
+#define BUCK_REG_MODE BIT(0)
+#define BUCK_REG_MODE_VBAT BIT(0)
+#define BUCK_REG_MODE_VSYS 0
+
+#define SMBB_BAT_PRES_STATUS 0x208
+#define PRES_STATUS_BAT_PRES BIT(7)
+#define SMBB_BAT_TEMP_STATUS 0x209
+#define TEMP_STATUS_OK BIT(7)
+#define TEMP_STATUS_HOT BIT(6)
+#define SMBB_BAT_BTC_CTRL 0x249
+#define BTC_CTRL_COMP_EN BIT(7)
+#define BTC_CTRL_COLD_EXT BIT(1)
+#define BTC_CTRL_HOT_EXT_N BIT(0)
+
+#define SMBB_USB_IMAX 0x344
+#define SMBB_USB_ENUM_TIMER_STOP 0x34e
+#define ENUM_TIMER_STOP BIT(0)
+#define SMBB_USB_SEC_ACCESS 0x3d0
+#define SEC_ACCESS_MAGIC 0xa5
+#define SMBB_USB_REV_BST 0x3ed
+#define REV_BST_CHG_GONE BIT(7)
+
+#define SMBB_DC_IMAX 0x444
+
+#define SMBB_MISC_REV2 0x601
+#define SMBB_MISC_BOOT_DONE 0x642
+#define BOOT_DONE BIT(7)
+
+#define STATUS_USBIN_VALID BIT(0) /* USB connection is valid */
+#define STATUS_DCIN_VALID BIT(1) /* DC connection is valid */
+#define STATUS_BAT_HOT BIT(2) /* Battery temp 1=Hot, 0=Cold */
+#define STATUS_BAT_OK BIT(3) /* Battery temp OK */
+#define STATUS_BAT_PRESENT BIT(4) /* Battery is present */
+#define STATUS_CHG_DONE BIT(5) /* Charge cycle is complete */
+#define STATUS_CHG_TRKL BIT(6) /* Trickle charging */
+#define STATUS_CHG_FAST BIT(7) /* Fast charging */
+#define STATUS_CHG_GONE BIT(8) /* No charger is connected */
+
+enum smbb_attr {
+ ATTR_BAT_ISAFE,
+ ATTR_BAT_IMAX,
+ ATTR_USBIN_IMAX,
+ ATTR_DCIN_IMAX,
+ ATTR_BAT_VSAFE,
+ ATTR_BAT_VMAX,
+ ATTR_BAT_VMIN,
+ ATTR_CHG_VDET,
+ ATTR_VIN_MIN,
+ _ATTR_CNT,
+};
+
+struct smbb_charger {
+ unsigned int revision;
+ unsigned int addr;
+ struct device *dev;
+
+ bool dc_disabled;
+ bool jeita_ext_temp;
+ unsigned long status;
+ struct mutex statlock;
+
+ unsigned int attr[_ATTR_CNT];
+
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
+ struct power_supply *bat_psy;
+ struct regmap *regmap;
+};
+
+static int smbb_vbat_weak_fn(unsigned int index)
+{
+ return 2100000 + index * 100000;
+}
+
+static int smbb_vin_fn(unsigned int index)
+{
+ if (index > 42)
+ return 5600000 + (index - 43) * 200000;
+ return 3400000 + index * 50000;
+}
+
+static int smbb_vmax_fn(unsigned int index)
+{
+ return 3240000 + index * 10000;
+}
+
+static int smbb_vbat_det_fn(unsigned int index)
+{
+ return 3240000 + index * 20000;
+}
+
+static int smbb_imax_fn(unsigned int index)
+{
+ if (index < 2)
+ return 100000 + index * 50000;
+ return index * 100000;
+}
+
+static int smbb_bat_imax_fn(unsigned int index)
+{
+ return index * 50000;
+}
+
+static unsigned int smbb_hw_lookup(unsigned int val, int (*fn)(unsigned int))
+{
+ unsigned int widx;
+ unsigned int sel;
+
+ for (widx = sel = 0; (*fn)(widx) <= val; ++widx)
+ sel = widx;
+
+ return sel;
+}
+
+static const struct smbb_charger_attr {
+ const char *name;
+ unsigned int reg;
+ unsigned int safe_reg;
+ unsigned int max;
+ unsigned int min;
+ unsigned int fail_ok;
+ int (*hw_fn)(unsigned int);
+} smbb_charger_attrs[] = {
+ [ATTR_BAT_ISAFE] = {
+ .name = "qcom,fast-charge-safe-current",
+ .reg = SMBB_CHG_ISAFE,
+ .max = 3000000,
+ .min = 200000,
+ .hw_fn = smbb_bat_imax_fn,
+ .fail_ok = 1,
+ },
+ [ATTR_BAT_IMAX] = {
+ .name = "qcom,fast-charge-current-limit",
+ .reg = SMBB_CHG_IMAX,
+ .safe_reg = SMBB_CHG_ISAFE,
+ .max = 3000000,
+ .min = 200000,
+ .hw_fn = smbb_bat_imax_fn,
+ },
+ [ATTR_DCIN_IMAX] = {
+ .name = "qcom,dc-current-limit",
+ .reg = SMBB_DC_IMAX,
+ .max = 2500000,
+ .min = 100000,
+ .hw_fn = smbb_imax_fn,
+ },
+ [ATTR_BAT_VSAFE] = {
+ .name = "qcom,fast-charge-safe-voltage",
+ .reg = SMBB_CHG_VSAFE,
+ .max = 5000000,
+ .min = 3240000,
+ .hw_fn = smbb_vmax_fn,
+ .fail_ok = 1,
+ },
+ [ATTR_BAT_VMAX] = {
+ .name = "qcom,fast-charge-high-threshold-voltage",
+ .reg = SMBB_CHG_VMAX,
+ .safe_reg = SMBB_CHG_VSAFE,
+ .max = 5000000,
+ .min = 3240000,
+ .hw_fn = smbb_vmax_fn,
+ },
+ [ATTR_BAT_VMIN] = {
+ .name = "qcom,fast-charge-low-threshold-voltage",
+ .reg = SMBB_CHG_VBAT_WEAK,
+ .max = 3600000,
+ .min = 2100000,
+ .hw_fn = smbb_vbat_weak_fn,
+ },
+ [ATTR_CHG_VDET] = {
+ .name = "qcom,auto-recharge-threshold-voltage",
+ .reg = SMBB_CHG_VBAT_DET,
+ .max = 5000000,
+ .min = 3240000,
+ .hw_fn = smbb_vbat_det_fn,
+ },
+ [ATTR_VIN_MIN] = {
+ .name = "qcom,minimum-input-voltage",
+ .reg = SMBB_CHG_VIN_MIN,
+ .max = 9600000,
+ .min = 4200000,
+ .hw_fn = smbb_vin_fn,
+ },
+ [ATTR_USBIN_IMAX] = {
+ .name = "usb-charge-current-limit",
+ .reg = SMBB_USB_IMAX,
+ .max = 2500000,
+ .min = 100000,
+ .hw_fn = smbb_imax_fn,
+ },
+};
+
+static int smbb_charger_attr_write(struct smbb_charger *chg,
+ enum smbb_attr which, unsigned int val)
+{
+ const struct smbb_charger_attr *prop;
+ unsigned int wval;
+ unsigned int out;
+ int rc;
+
+ prop = &smbb_charger_attrs[which];
+
+ if (val > prop->max || val < prop->min) {
+ dev_err(chg->dev, "value out of range for %s [%u:%u]\n",
+ prop->name, prop->min, prop->max);
+ return -EINVAL;
+ }
+
+ if (prop->safe_reg) {
+ rc = regmap_read(chg->regmap,
+ chg->addr + prop->safe_reg, &wval);
+ if (rc) {
+ dev_err(chg->dev,
+ "unable to read safe value for '%s'\n",
+ prop->name);
+ return rc;
+ }
+
+ wval = prop->hw_fn(wval);
+
+ if (val > wval) {
+ dev_warn(chg->dev,
+ "%s above safe value, clamping at %u\n",
+ prop->name, wval);
+ val = wval;
+ }
+ }
+
+ wval = smbb_hw_lookup(val, prop->hw_fn);
+
+ rc = regmap_write(chg->regmap, chg->addr + prop->reg, wval);
+ if (rc) {
+ dev_err(chg->dev, "unable to update %s", prop->name);
+ return rc;
+ }
+ out = prop->hw_fn(wval);
+ if (out != val) {
+ dev_warn(chg->dev,
+ "%s inaccurate, rounded to %u\n",
+ prop->name, out);
+ }
+
+ dev_dbg(chg->dev, "%s <= %d\n", prop->name, out);
+
+ chg->attr[which] = out;
+
+ return 0;
+}
+
+static int smbb_charger_attr_read(struct smbb_charger *chg,
+ enum smbb_attr which)
+{
+ const struct smbb_charger_attr *prop;
+ unsigned int val;
+ int rc;
+
+ prop = &smbb_charger_attrs[which];
+
+ rc = regmap_read(chg->regmap, chg->addr + prop->reg, &val);
+ if (rc) {
+ dev_err(chg->dev, "failed to read %s\n", prop->name);
+ return rc;
+ }
+ val = prop->hw_fn(val);
+ dev_dbg(chg->dev, "%s => %d\n", prop->name, val);
+
+ chg->attr[which] = val;
+
+ return 0;
+}
+
+static int smbb_charger_attr_parse(struct smbb_charger *chg,
+ enum smbb_attr which)
+{
+ const struct smbb_charger_attr *prop;
+ unsigned int val;
+ int rc;
+
+ prop = &smbb_charger_attrs[which];
+
+ rc = of_property_read_u32(chg->dev->of_node, prop->name, &val);
+ if (rc == 0) {
+ rc = smbb_charger_attr_write(chg, which, val);
+ if (!rc || !prop->fail_ok)
+ return rc;
+ }
+ return smbb_charger_attr_read(chg, which);
+}
+
+static void smbb_set_line_flag(struct smbb_charger *chg, int irq, int flag)
+{
+ bool state;
+ int ret;
+
+ ret = irq_get_irqchip_state(irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+ if (ret < 0) {
+ dev_err(chg->dev, "failed to read irq line\n");
+ return;
+ }
+
+ mutex_lock(&chg->statlock);
+ if (state)
+ chg->status |= flag;
+ else
+ chg->status &= ~flag;
+ mutex_unlock(&chg->statlock);
+
+ dev_dbg(chg->dev, "status = %03lx\n", chg->status);
+}
+
+static irqreturn_t smbb_usb_valid_handler(int irq, void *_data)
+{
+ struct smbb_charger *chg = _data;
+
+ smbb_set_line_flag(chg, irq, STATUS_USBIN_VALID);
+ power_supply_changed(chg->usb_psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t smbb_dc_valid_handler(int irq, void *_data)
+{
+ struct smbb_charger *chg = _data;
+
+ smbb_set_line_flag(chg, irq, STATUS_DCIN_VALID);
+ if (!chg->dc_disabled)
+ power_supply_changed(chg->dc_psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t smbb_bat_temp_handler(int irq, void *_data)
+{
+ struct smbb_charger *chg = _data;
+ unsigned int val;
+ int rc;
+
+ rc = regmap_read(chg->regmap, chg->addr + SMBB_BAT_TEMP_STATUS, &val);
+ if (rc)
+ return IRQ_HANDLED;
+
+ mutex_lock(&chg->statlock);
+ if (val & TEMP_STATUS_OK) {
+ chg->status |= STATUS_BAT_OK;
+ } else {
+ chg->status &= ~STATUS_BAT_OK;
+ if (val & TEMP_STATUS_HOT)
+ chg->status |= STATUS_BAT_HOT;
+ }
+ mutex_unlock(&chg->statlock);
+
+ power_supply_changed(chg->bat_psy);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t smbb_bat_present_handler(int irq, void *_data)
+{
+ struct smbb_charger *chg = _data;
+
+ smbb_set_line_flag(chg, irq, STATUS_BAT_PRESENT);
+ power_supply_changed(chg->bat_psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t smbb_chg_done_handler(int irq, void *_data)
+{
+ struct smbb_charger *chg = _data;
+
+ smbb_set_line_flag(chg, irq, STATUS_CHG_DONE);
+ power_supply_changed(chg->bat_psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t smbb_chg_gone_handler(int irq, void *_data)
+{
+ struct smbb_charger *chg = _data;
+
+ smbb_set_line_flag(chg, irq, STATUS_CHG_GONE);
+ power_supply_changed(chg->bat_psy);
+ power_supply_changed(chg->usb_psy);
+ if (!chg->dc_disabled)
+ power_supply_changed(chg->dc_psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t smbb_chg_fast_handler(int irq, void *_data)
+{
+ struct smbb_charger *chg = _data;
+
+ smbb_set_line_flag(chg, irq, STATUS_CHG_FAST);
+ power_supply_changed(chg->bat_psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t smbb_chg_trkl_handler(int irq, void *_data)
+{
+ struct smbb_charger *chg = _data;
+
+ smbb_set_line_flag(chg, irq, STATUS_CHG_TRKL);
+ power_supply_changed(chg->bat_psy);
+
+ return IRQ_HANDLED;
+}
+
+static const struct smbb_irq {
+ const char *name;
+ irqreturn_t (*handler)(int, void *);
+} smbb_charger_irqs[] = {
+ { "chg-done", smbb_chg_done_handler },
+ { "chg-fast", smbb_chg_fast_handler },
+ { "chg-trkl", smbb_chg_trkl_handler },
+ { "bat-temp-ok", smbb_bat_temp_handler },
+ { "bat-present", smbb_bat_present_handler },
+ { "chg-gone", smbb_chg_gone_handler },
+ { "usb-valid", smbb_usb_valid_handler },
+ { "dc-valid", smbb_dc_valid_handler },
+};
+
+static int smbb_usbin_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smbb_charger *chg = power_supply_get_drvdata(psy);
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ mutex_lock(&chg->statlock);
+ val->intval = !(chg->status & STATUS_CHG_GONE) &&
+ (chg->status & STATUS_USBIN_VALID);
+ mutex_unlock(&chg->statlock);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ val->intval = chg->attr[ATTR_USBIN_IMAX];
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+ val->intval = 2500000;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int smbb_usbin_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smbb_charger *chg = power_supply_get_drvdata(psy);
+ int rc;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ rc = smbb_charger_attr_write(chg, ATTR_USBIN_IMAX,
+ val->intval);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int smbb_dcin_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smbb_charger *chg = power_supply_get_drvdata(psy);
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ mutex_lock(&chg->statlock);
+ val->intval = !(chg->status & STATUS_CHG_GONE) &&
+ (chg->status & STATUS_DCIN_VALID);
+ mutex_unlock(&chg->statlock);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ val->intval = chg->attr[ATTR_DCIN_IMAX];
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+ val->intval = 2500000;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int smbb_dcin_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smbb_charger *chg = power_supply_get_drvdata(psy);
+ int rc;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ rc = smbb_charger_attr_write(chg, ATTR_DCIN_IMAX,
+ val->intval);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int smbb_charger_writable_property(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ return psp == POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT;
+}
+
+static int smbb_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smbb_charger *chg = power_supply_get_drvdata(psy);
+ unsigned long status;
+ int rc = 0;
+
+ mutex_lock(&chg->statlock);
+ status = chg->status;
+ mutex_unlock(&chg->statlock);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (status & STATUS_CHG_GONE)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (!(status & (STATUS_DCIN_VALID | STATUS_USBIN_VALID)))
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (status & STATUS_CHG_DONE)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else if (!(status & STATUS_BAT_OK))
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (status & (STATUS_CHG_FAST | STATUS_CHG_TRKL))
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else /* everything is ok for charging, but we are not... */
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (status & STATUS_BAT_OK)
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ else if (status & STATUS_BAT_HOT)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ if (status & STATUS_CHG_FAST)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else if (status & STATUS_CHG_TRKL)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ else
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = !!(status & STATUS_BAT_PRESENT);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = chg->attr[ATTR_BAT_IMAX];
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = chg->attr[ATTR_BAT_VMAX];
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ /* this charger is a single-cell lithium-ion battery charger
+ * only. If you hook up some other technology, there will be
+ * fireworks.
+ */
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = 3000000; /* single-cell li-ion low end */
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int smbb_battery_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smbb_charger *chg = power_supply_get_drvdata(psy);
+ int rc;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smbb_charger_attr_write(chg, ATTR_BAT_IMAX, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ rc = smbb_charger_attr_write(chg, ATTR_BAT_VMAX, val->intval);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int smbb_battery_writable_property(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static enum power_supply_property smbb_charger_properties[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+};
+
+static enum power_supply_property smbb_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+};
+
+static const struct reg_off_mask_default {
+ unsigned int offset;
+ unsigned int mask;
+ unsigned int value;
+ unsigned int rev_mask;
+} smbb_charger_setup[] = {
+ /* The bootloader is supposed to set this... make sure anyway. */
+ { SMBB_MISC_BOOT_DONE, BOOT_DONE, BOOT_DONE },
+
+ /* Disable software timer */
+ { SMBB_CHG_TCHG_MAX_EN, TCHG_MAX_EN, 0 },
+
+ /* Clear and disable watchdog */
+ { SMBB_CHG_WDOG_TIME, 0xff, 160 },
+ { SMBB_CHG_WDOG_EN, WDOG_EN, 0 },
+
+ /* Use charger based EoC detection */
+ { SMBB_CHG_IBAT_TERM_CHG, IBAT_TERM_CHG_IEOC, IBAT_TERM_CHG_IEOC_CHG },
+
+ /* Disable GSM PA load adjustment.
+ * The PA signal is incorrectly connected on v2.
+ */
+ { SMBB_CHG_CFG, 0xff, 0x00, BIT(3) },
+
+ /* Use VBAT (not VSYS) to compensate for IR drop during fast charging */
+ { SMBB_BUCK_REG_MODE, BUCK_REG_MODE, BUCK_REG_MODE_VBAT },
+
+ /* Enable battery temperature comparators */
+ { SMBB_BAT_BTC_CTRL, BTC_CTRL_COMP_EN, BTC_CTRL_COMP_EN },
+
+ /* Stop USB enumeration timer */
+ { SMBB_USB_ENUM_TIMER_STOP, ENUM_TIMER_STOP, ENUM_TIMER_STOP },
+
+#if 0 /* FIXME supposedly only to disable hardware ARB termination */
+ { SMBB_USB_SEC_ACCESS, SEC_ACCESS_MAGIC },
+ { SMBB_USB_REV_BST, 0xff, REV_BST_CHG_GONE },
+#endif
+
+ /* Stop USB enumeration timer, again */
+ { SMBB_USB_ENUM_TIMER_STOP, ENUM_TIMER_STOP, ENUM_TIMER_STOP },
+
+ /* Enable charging */
+ { SMBB_CHG_CTRL, CTRL_EN, CTRL_EN },
+};
+
+static char *smbb_bif[] = { "smbb-bif" };
+
+static const struct power_supply_desc bat_psy_desc = {
+ .name = "smbb-bif",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = smbb_battery_properties,
+ .num_properties = ARRAY_SIZE(smbb_battery_properties),
+ .get_property = smbb_battery_get_property,
+ .set_property = smbb_battery_set_property,
+ .property_is_writeable = smbb_battery_writable_property,
+};
+
+static const struct power_supply_desc usb_psy_desc = {
+ .name = "smbb-usbin",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = smbb_charger_properties,
+ .num_properties = ARRAY_SIZE(smbb_charger_properties),
+ .get_property = smbb_usbin_get_property,
+ .set_property = smbb_usbin_set_property,
+ .property_is_writeable = smbb_charger_writable_property,
+};
+
+static const struct power_supply_desc dc_psy_desc = {
+ .name = "smbb-dcin",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = smbb_charger_properties,
+ .num_properties = ARRAY_SIZE(smbb_charger_properties),
+ .get_property = smbb_dcin_get_property,
+ .set_property = smbb_dcin_set_property,
+ .property_is_writeable = smbb_charger_writable_property,
+};
+
+static int smbb_charger_probe(struct platform_device *pdev)
+{
+ struct power_supply_config bat_cfg = {};
+ struct power_supply_config usb_cfg = {};
+ struct power_supply_config dc_cfg = {};
+ struct smbb_charger *chg;
+ int rc, i;
+
+ chg = devm_kzalloc(&pdev->dev, sizeof(*chg), GFP_KERNEL);
+ if (!chg)
+ return -ENOMEM;
+
+ chg->dev = &pdev->dev;
+ mutex_init(&chg->statlock);
+
+ chg->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!chg->regmap) {
+ dev_err(&pdev->dev, "failed to locate regmap\n");
+ return -ENODEV;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &chg->addr);
+ if (rc) {
+ dev_err(&pdev->dev, "missing or invalid 'reg' property\n");
+ return rc;
+ }
+
+ rc = regmap_read(chg->regmap, chg->addr + SMBB_MISC_REV2, &chg->revision);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to read revision\n");
+ return rc;
+ }
+
+ chg->revision += 1;
+ if (chg->revision != 2 && chg->revision != 3) {
+ dev_err(&pdev->dev, "v1 hardware not supported\n");
+ return -ENODEV;
+ }
+ dev_info(&pdev->dev, "Initializing SMBB rev %u", chg->revision);
+
+ chg->dc_disabled = of_property_read_bool(pdev->dev.of_node, "qcom,disable-dc");
+
+ for (i = 0; i < _ATTR_CNT; ++i) {
+ rc = smbb_charger_attr_parse(chg, i);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to parse/apply settings\n");
+ return rc;
+ }
+ }
+
+ bat_cfg.drv_data = chg;
+ bat_cfg.of_node = pdev->dev.of_node;
+ chg->bat_psy = devm_power_supply_register(&pdev->dev,
+ &bat_psy_desc,
+ &bat_cfg);
+ if (IS_ERR(chg->bat_psy)) {
+ dev_err(&pdev->dev, "failed to register battery\n");
+ return PTR_ERR(chg->bat_psy);
+ }
+
+ usb_cfg.drv_data = chg;
+ usb_cfg.supplied_to = smbb_bif;
+ usb_cfg.num_supplicants = ARRAY_SIZE(smbb_bif);
+ chg->usb_psy = devm_power_supply_register(&pdev->dev,
+ &usb_psy_desc,
+ &usb_cfg);
+ if (IS_ERR(chg->usb_psy)) {
+ dev_err(&pdev->dev, "failed to register USB power supply\n");
+ return PTR_ERR(chg->usb_psy);
+ }
+
+ if (!chg->dc_disabled) {
+ dc_cfg.drv_data = chg;
+ dc_cfg.supplied_to = smbb_bif;
+ dc_cfg.num_supplicants = ARRAY_SIZE(smbb_bif);
+ chg->dc_psy = devm_power_supply_register(&pdev->dev,
+ &dc_psy_desc,
+ &dc_cfg);
+ if (IS_ERR(chg->dc_psy)) {
+ dev_err(&pdev->dev, "failed to register DC power supply\n");
+ return PTR_ERR(chg->dc_psy);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(smbb_charger_irqs); ++i) {
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, smbb_charger_irqs[i].name);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq '%s'\n",
+ smbb_charger_irqs[i].name);
+ return irq;
+ }
+
+ smbb_charger_irqs[i].handler(irq, chg);
+
+ rc = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ smbb_charger_irqs[i].handler, IRQF_ONESHOT,
+ smbb_charger_irqs[i].name, chg);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to request irq '%s'\n",
+ smbb_charger_irqs[i].name);
+ return rc;
+ }
+ }
+
+ chg->jeita_ext_temp = of_property_read_bool(pdev->dev.of_node,
+ "qcom,jeita-extended-temp-range");
+
+ /* Set temperature range to [35%:70%] or [25%:80%] accordingly */
+ rc = regmap_update_bits(chg->regmap, chg->addr + SMBB_BAT_BTC_CTRL,
+ BTC_CTRL_COLD_EXT | BTC_CTRL_HOT_EXT_N,
+ chg->jeita_ext_temp ?
+ BTC_CTRL_COLD_EXT :
+ BTC_CTRL_HOT_EXT_N);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "unable to set %s temperature range\n",
+ chg->jeita_ext_temp ? "JEITA extended" : "normal");
+ return rc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(smbb_charger_setup); ++i) {
+ const struct reg_off_mask_default *r = &smbb_charger_setup[i];
+
+ if (r->rev_mask & BIT(chg->revision))
+ continue;
+
+ rc = regmap_update_bits(chg->regmap, chg->addr + r->offset,
+ r->mask, r->value);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "unable to initializing charging, bailing\n");
+ return rc;
+ }
+ }
+
+ platform_set_drvdata(pdev, chg);
+
+ return 0;
+}
+
+static int smbb_charger_remove(struct platform_device *pdev)
+{
+ struct smbb_charger *chg;
+
+ chg = platform_get_drvdata(pdev);
+
+ regmap_update_bits(chg->regmap, chg->addr + SMBB_CHG_CTRL, CTRL_EN, 0);
+
+ return 0;
+}
+
+static const struct of_device_id smbb_charger_id_table[] = {
+ { .compatible = "qcom,pm8941-charger" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, smbb_charger_id_table);
+
+static struct platform_driver smbb_charger_driver = {
+ .probe = smbb_charger_probe,
+ .remove = smbb_charger_remove,
+ .driver = {
+ .name = "qcom-smbb",
+ .of_match_table = smbb_charger_id_table,
+ },
+};
+module_platform_driver(smbb_charger_driver);
+
+MODULE_DESCRIPTION("Qualcomm Switch-Mode Battery Charger and Boost driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 5a0189bf19bb..1131cf75acc6 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -15,7 +15,7 @@ config POWER_RESET_AS3722
This driver supports turning off board via a ams AS3722 power-off.
config POWER_RESET_AT91_POWEROFF
- bool "Atmel AT91 poweroff driver"
+ tristate "Atmel AT91 poweroff driver"
depends on ARCH_AT91
default SOC_AT91SAM9 || SOC_SAMA5
help
@@ -23,7 +23,7 @@ config POWER_RESET_AT91_POWEROFF
SoCs
config POWER_RESET_AT91_RESET
- bool "Atmel AT91 reset driver"
+ tristate "Atmel AT91 reset driver"
depends on ARCH_AT91
default SOC_AT91SAM9 || SOC_SAMA5
help
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index 9847cfb7e23d..e9e24df35f26 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -10,6 +10,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -48,6 +49,7 @@ static const char *shdwc_wakeup_modes[] = {
};
static void __iomem *at91_shdwc_base;
+static struct clk *sclk;
static void __init at91_wakeup_status(void)
{
@@ -119,9 +121,10 @@ static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev)
writel(wakeup_mode | mode, at91_shdwc_base + AT91_SHDW_MR);
}
-static int at91_poweroff_probe(struct platform_device *pdev)
+static int __init at91_poweroff_probe(struct platform_device *pdev)
{
struct resource *res;
+ int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
at91_shdwc_base = devm_ioremap_resource(&pdev->dev, res);
@@ -130,6 +133,16 @@ static int at91_poweroff_probe(struct platform_device *pdev)
return PTR_ERR(at91_shdwc_base);
}
+ sclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sclk))
+ return PTR_ERR(sclk);
+
+ ret = clk_prepare_enable(sclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not enable slow clock\n");
+ return ret;
+ }
+
at91_wakeup_status();
if (pdev->dev.of_node)
@@ -140,6 +153,16 @@ static int at91_poweroff_probe(struct platform_device *pdev)
return 0;
}
+static int __exit at91_poweroff_remove(struct platform_device *pdev)
+{
+ if (pm_power_off == at91_poweroff)
+ pm_power_off = NULL;
+
+ clk_disable_unprepare(sclk);
+
+ return 0;
+}
+
static const struct of_device_id at91_poweroff_of_match[] = {
{ .compatible = "atmel,at91sam9260-shdwc", },
{ .compatible = "atmel,at91sam9rl-shdwc", },
@@ -148,10 +171,14 @@ static const struct of_device_id at91_poweroff_of_match[] = {
};
static struct platform_driver at91_poweroff_driver = {
- .probe = at91_poweroff_probe,
+ .remove = __exit_p(at91_poweroff_remove),
.driver = {
.name = "at91-poweroff",
.of_match_table = at91_poweroff_of_match,
},
};
-module_platform_driver(at91_poweroff_driver);
+module_platform_driver_probe(at91_poweroff_driver, at91_poweroff_probe);
+
+MODULE_AUTHOR("Atmel Corporation");
+MODULE_DESCRIPTION("Shutdown driver for Atmel SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index c378d4ec826f..3f6b5dd7c3d4 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -1,5 +1,5 @@
/*
- * Atmel AT91 SAM9 SoCs reset code
+ * Atmel AT91 SAM9 & SAMA5 SoCs reset code
*
* Copyright (C) 2007 Atmel Corporation.
* Copyright (C) BitBox Ltd 2010
@@ -11,6 +11,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -46,6 +47,7 @@ enum reset_type {
};
static void __iomem *at91_ramc_base[2], *at91_rstc_base;
+static struct clk *sclk;
/*
* unless the SDRAM is cleanly shutdown before we hit the
@@ -178,11 +180,11 @@ static struct notifier_block at91_restart_nb = {
.priority = 192,
};
-static int at91_reset_of_probe(struct platform_device *pdev)
+static int __init at91_reset_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct device_node *np;
- int idx = 0;
+ int ret, idx = 0;
at91_rstc_base = of_iomap(pdev->dev.of_node, 0);
if (!at91_rstc_base) {
@@ -204,53 +206,32 @@ static int at91_reset_of_probe(struct platform_device *pdev)
match = of_match_node(at91_reset_of_match, pdev->dev.of_node);
at91_restart_nb.notifier_call = match->data;
- return register_restart_handler(&at91_restart_nb);
-}
-static int at91_reset_platform_probe(struct platform_device *pdev)
-{
- const struct platform_device_id *match;
- struct resource *res;
- int idx = 0;
+ sclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sclk))
+ return PTR_ERR(sclk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- at91_rstc_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(at91_rstc_base)) {
- dev_err(&pdev->dev, "Could not map reset controller address\n");
- return PTR_ERR(at91_rstc_base);
+ ret = clk_prepare_enable(sclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not enable slow clock\n");
+ return ret;
}
- for (idx = 0; idx < 2; idx++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1 );
- at91_ramc_base[idx] = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!at91_ramc_base[idx]) {
- dev_err(&pdev->dev, "Could not map ram controller address\n");
- return -ENOMEM;
- }
+ ret = register_restart_handler(&at91_restart_nb);
+ if (ret) {
+ clk_disable_unprepare(sclk);
+ return ret;
}
- match = platform_get_device_id(pdev);
- at91_restart_nb.notifier_call =
- (int (*)(struct notifier_block *,
- unsigned long, void *)) match->driver_data;
+ at91_reset_status(pdev);
- return register_restart_handler(&at91_restart_nb);
+ return 0;
}
-static int at91_reset_probe(struct platform_device *pdev)
+static int __exit at91_reset_remove(struct platform_device *pdev)
{
- int ret;
-
- if (pdev->dev.of_node)
- ret = at91_reset_of_probe(pdev);
- else
- ret = at91_reset_platform_probe(pdev);
-
- if (ret)
- return ret;
-
- at91_reset_status(pdev);
+ unregister_restart_handler(&at91_restart_nb);
+ clk_disable_unprepare(sclk);
return 0;
}
@@ -262,11 +243,15 @@ static const struct platform_device_id at91_reset_plat_match[] = {
};
static struct platform_driver at91_reset_driver = {
- .probe = at91_reset_probe,
+ .remove = __exit_p(at91_reset_remove),
.driver = {
.name = "at91-reset",
.of_match_table = at91_reset_of_match,
},
.id_table = at91_reset_plat_match,
};
-module_platform_driver(at91_reset_driver);
+module_platform_driver_probe(at91_reset_driver, at91_reset_probe);
+
+MODULE_AUTHOR("Atmel Corporation");
+MODULE_DESCRIPTION("Reset driver for Atmel SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/rt9455_charger.c b/drivers/power/rt9455_charger.c
index a49a9d44bdda..cfdbde9daf94 100644
--- a/drivers/power/rt9455_charger.c
+++ b/drivers/power/rt9455_charger.c
@@ -1760,5 +1760,4 @@ module_i2c_driver(rt9455_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anda-Maria Nicolae <anda-maria.nicolae@intel.com>");
-MODULE_ALIAS("i2c:rt9455-charger");
MODULE_DESCRIPTION("Richtek RT9455 Charger Driver");
diff --git a/drivers/power/smb347-charger.c b/drivers/power/smb347-charger.c
index 0b60a0b5878b..072c5189bd6d 100644
--- a/drivers/power/smb347-charger.c
+++ b/drivers/power/smb347-charger.c
@@ -1332,4 +1332,3 @@ MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("SMB347 battery charger driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("i2c:smb347");
diff --git a/drivers/power/tps65090-charger.c b/drivers/power/tps65090-charger.c
index 7e8fbd29c30e..1b4b5e09538e 100644
--- a/drivers/power/tps65090-charger.c
+++ b/drivers/power/tps65090-charger.c
@@ -353,6 +353,7 @@ static const struct of_device_id of_tps65090_charger_match[] = {
{ .compatible = "ti,tps65090-charger", },
{ /* end */ }
};
+MODULE_DEVICE_TABLE(of, of_tps65090_charger_match);
static struct platform_driver tps65090_charger_driver = {
.driver = {
diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c
new file mode 100644
index 000000000000..d9f56730c735
--- /dev/null
+++ b/drivers/power/tps65217_charger.c
@@ -0,0 +1,264 @@
+/*
+ * Battery charger driver for TI's tps65217
+ *
+ * Copyright (c) 2015, Collabora Ltd.
+
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Battery charger driver for TI's tps65217
+ */
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/power_supply.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65217.h>
+
+#define POLL_INTERVAL (HZ * 2)
+
+struct tps65217_charger {
+ struct tps65217 *tps;
+ struct device *dev;
+ struct power_supply *ac;
+
+ int ac_online;
+ int prev_ac_online;
+
+ struct task_struct *poll_task;
+};
+
+static enum power_supply_property tps65217_ac_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int tps65217_config_charger(struct tps65217_charger *charger)
+{
+ int ret;
+
+ dev_dbg(charger->dev, "%s\n", __func__);
+
+ /*
+ * tps65217 rev. G, p. 31 (see p. 32 for NTC schematic)
+ *
+ * The device can be configured to support a 100k NTC (B = 3960) by
+ * setting the the NTC_TYPE bit in register CHGCONFIG1 to 1. However it
+ * is not recommended to do so. In sleep mode, the charger continues
+ * charging the battery, but all register values are reset to default
+ * values. Therefore, the charger would get the wrong temperature
+ * information. If 100k NTC setting is required, please contact the
+ * factory.
+ *
+ * ATTENTION, conflicting information, from p. 46
+ *
+ * NTC TYPE (for battery temperature measurement)
+ * 0 – 100k (curve 1, B = 3960)
+ * 1 – 10k (curve 2, B = 3480) (default on reset)
+ *
+ */
+ ret = tps65217_clear_bits(charger->tps, TPS65217_REG_CHGCONFIG1,
+ TPS65217_CHGCONFIG1_NTC_TYPE,
+ TPS65217_PROTECT_NONE);
+ if (ret) {
+ dev_err(charger->dev,
+ "failed to set 100k NTC setting: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tps65217_enable_charging(struct tps65217_charger *charger)
+{
+ int ret;
+
+ /* charger already enabled */
+ if (charger->ac_online)
+ return 0;
+
+ dev_dbg(charger->dev, "%s: enable charging\n", __func__);
+ ret = tps65217_set_bits(charger->tps, TPS65217_REG_CHGCONFIG1,
+ TPS65217_CHGCONFIG1_CHG_EN,
+ TPS65217_CHGCONFIG1_CHG_EN,
+ TPS65217_PROTECT_NONE);
+ if (ret) {
+ dev_err(charger->dev,
+ "%s: Error in writing CHG_EN in reg 0x%x: %d\n",
+ __func__, TPS65217_REG_CHGCONFIG1, ret);
+ return ret;
+ }
+
+ charger->ac_online = 1;
+
+ return 0;
+}
+
+static int tps65217_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct tps65217_charger *charger = power_supply_get_drvdata(psy);
+
+ if (psp == POWER_SUPPLY_PROP_ONLINE) {
+ val->intval = charger->ac_online;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static irqreturn_t tps65217_charger_irq(int irq, void *dev)
+{
+ int ret, val;
+ struct tps65217_charger *charger = dev;
+
+ charger->prev_ac_online = charger->ac_online;
+
+ ret = tps65217_reg_read(charger->tps, TPS65217_REG_STATUS, &val);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s: Error in reading reg 0x%x\n",
+ __func__, TPS65217_REG_STATUS);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(charger->dev, "%s: 0x%x\n", __func__, val);
+
+ /* check for AC status bit */
+ if (val & TPS65217_STATUS_ACPWR) {
+ ret = tps65217_enable_charging(charger);
+ if (ret) {
+ dev_err(charger->dev,
+ "failed to enable charger: %d\n", ret);
+ return IRQ_HANDLED;
+ }
+ } else {
+ charger->ac_online = 0;
+ }
+
+ if (charger->prev_ac_online != charger->ac_online)
+ power_supply_changed(charger->ac);
+
+ ret = tps65217_reg_read(charger->tps, TPS65217_REG_CHGCONFIG0, &val);
+ if (ret < 0) {
+ dev_err(charger->dev, "%s: Error in reading reg 0x%x\n",
+ __func__, TPS65217_REG_CHGCONFIG0);
+ return IRQ_HANDLED;
+ }
+
+ if (val & TPS65217_CHGCONFIG0_ACTIVE)
+ dev_dbg(charger->dev, "%s: charger is charging\n", __func__);
+ else
+ dev_dbg(charger->dev,
+ "%s: charger is NOT charging\n", __func__);
+
+ return IRQ_HANDLED;
+}
+
+static int tps65217_charger_poll_task(void *data)
+{
+ set_freezable();
+
+ while (!kthread_should_stop()) {
+ schedule_timeout_interruptible(POLL_INTERVAL);
+ try_to_freeze();
+ tps65217_charger_irq(-1, data);
+ }
+ return 0;
+}
+
+static const struct power_supply_desc tps65217_charger_desc = {
+ .name = "tps65217-ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .get_property = tps65217_ac_get_property,
+ .properties = tps65217_ac_props,
+ .num_properties = ARRAY_SIZE(tps65217_ac_props),
+};
+
+static int tps65217_charger_probe(struct platform_device *pdev)
+{
+ struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct tps65217_charger *charger;
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->tps = tps;
+ charger->dev = &pdev->dev;
+
+ charger->ac = devm_power_supply_register(&pdev->dev,
+ &tps65217_charger_desc,
+ NULL);
+ if (IS_ERR(charger->ac)) {
+ dev_err(&pdev->dev, "failed: power supply register\n");
+ return PTR_ERR(charger->ac);
+ }
+
+ ret = tps65217_config_charger(charger);
+ if (ret < 0) {
+ dev_err(charger->dev, "charger config failed, err %d\n", ret);
+ return ret;
+ }
+
+ charger->poll_task = kthread_run(tps65217_charger_poll_task,
+ charger, "ktps65217charger");
+ if (IS_ERR(charger->poll_task)) {
+ ret = PTR_ERR(charger->poll_task);
+ dev_err(charger->dev, "Unable to run kthread err %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tps65217_charger_remove(struct platform_device *pdev)
+{
+ struct tps65217_charger *charger = platform_get_drvdata(pdev);
+
+ kthread_stop(charger->poll_task);
+
+ return 0;
+}
+
+static const struct of_device_id tps65217_charger_match_table[] = {
+ { .compatible = "ti,tps65217-charger", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tps65217_charger_match_table);
+
+static struct platform_driver tps65217_charger_driver = {
+ .probe = tps65217_charger_probe,
+ .remove = tps65217_charger_remove,
+ .driver = {
+ .name = "tps65217-charger",
+ .of_match_table = of_match_ptr(tps65217_charger_match_table),
+ },
+
+};
+module_platform_driver(tps65217_charger_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Enric Balletbo Serra <enric.balletbo@collabora.com>");
+MODULE_DESCRIPTION("TPS65217 battery charger driver");
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index 74f2d3ff1d7c..bcd4dc304f27 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -22,7 +22,7 @@
#include <linux/power_supply.h>
#include <linux/notifier.h>
#include <linux/usb/otg.h>
-#include <linux/i2c/twl4030-madc.h>
+#include <linux/iio/consumer.h>
#define TWL4030_BCIMDEN 0x00
#define TWL4030_BCIMDKEY 0x01
@@ -91,21 +91,23 @@
#define TWL4030_MSTATEC_COMPLETE1 0x0b
#define TWL4030_MSTATEC_COMPLETE4 0x0e
-#if IS_REACHABLE(CONFIG_TWL4030_MADC)
/*
* If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11)
* then AC is available.
*/
-static inline int ac_available(void)
+static inline int ac_available(struct iio_channel *channel_vac)
{
- return twl4030_get_madc_conversion(11) > 4500;
-}
-#else
-static inline int ac_available(void)
-{
- return 0;
+ int val, err;
+
+ if (!channel_vac)
+ return 0;
+
+ err = iio_read_channel_processed(channel_vac, &val);
+ if (err < 0)
+ return 0;
+ return val > 4500;
}
-#endif
+
static bool allow_usb;
module_param(allow_usb, bool, 0644);
MODULE_PARM_DESC(allow_usb, "Allow USB charge drawing default current");
@@ -128,6 +130,7 @@ struct twl4030_bci {
*/
unsigned int ichg_eoc, ichg_lo, ichg_hi;
unsigned int usb_cur, ac_cur;
+ struct iio_channel *channel_vac;
bool ac_is_active;
int usb_mode, ac_mode; /* charging mode requested */
#define CHARGE_OFF 0
@@ -278,7 +281,7 @@ static int twl4030_charger_update_current(struct twl4030_bci *bci)
* If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11)
* and AC is enabled, set current for 'ac'
*/
- if (ac_available()) {
+ if (ac_available(bci->channel_vac)) {
cur = bci->ac_cur;
bci->ac_is_active = true;
} else {
@@ -1048,6 +1051,12 @@ static int twl4030_bci_probe(struct platform_device *pdev)
return ret;
}
+ bci->channel_vac = iio_channel_get(&pdev->dev, "vac");
+ if (IS_ERR(bci->channel_vac)) {
+ bci->channel_vac = NULL;
+ dev_warn(&pdev->dev, "could not request vac iio channel");
+ }
+
INIT_WORK(&bci->work, twl4030_bci_usb_work);
INIT_DELAYED_WORK(&bci->current_worker, twl4030_current_worker);
@@ -1069,7 +1078,7 @@ static int twl4030_bci_probe(struct platform_device *pdev)
TWL4030_INTERRUPTS_BCIIMR1A);
if (ret < 0) {
dev_err(&pdev->dev, "failed to unmask interrupts: %d\n", ret);
- return ret;
+ goto fail;
}
reg = ~(u32)(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
@@ -1102,6 +1111,10 @@ static int twl4030_bci_probe(struct platform_device *pdev)
twl4030_charger_enable_backup(0, 0);
return 0;
+fail:
+ iio_channel_release(bci->channel_vac);
+
+ return ret;
}
static int __exit twl4030_bci_remove(struct platform_device *pdev)
@@ -1112,6 +1125,8 @@ static int __exit twl4030_bci_remove(struct platform_device *pdev)
twl4030_charger_enable_usb(bci, false);
twl4030_charger_enable_backup(0, 0);
+ iio_channel_release(bci->channel_vac);
+
device_remove_file(&bci->usb->dev, &dev_attr_max_current);
device_remove_file(&bci->usb->dev, &dev_attr_mode);
device_remove_file(&bci->ac->dev, &dev_attr_max_current);
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index db11ae6599f3..7082301da945 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -499,7 +499,8 @@ static int wm831x_power_probe(struct platform_device *pdev)
struct wm831x_power *power;
int ret, irq, i;
- power = kzalloc(sizeof(struct wm831x_power), GFP_KERNEL);
+ power = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_power),
+ GFP_KERNEL);
if (power == NULL)
return -ENOMEM;
@@ -536,7 +537,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
NULL);
if (IS_ERR(power->wall)) {
ret = PTR_ERR(power->wall);
- goto err_kmalloc;
+ goto err;
}
power->usb_desc.name = power->usb_name,
@@ -572,7 +573,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
- IRQF_TRIGGER_RISING, "System power low",
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "System power low",
power);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
@@ -582,7 +583,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
- IRQF_TRIGGER_RISING, "Power source",
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "Power source",
power);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
@@ -595,7 +596,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
platform_get_irq_byname(pdev,
wm831x_bat_irqs[i]));
ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
- IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
wm831x_bat_irqs[i],
power);
if (ret != 0) {
@@ -626,8 +627,7 @@ err_usb:
power_supply_unregister(power->usb);
err_wall:
power_supply_unregister(power->wall);
-err_kmalloc:
- kfree(power);
+err:
return ret;
}
@@ -654,7 +654,6 @@ static int wm831x_power_remove(struct platform_device *pdev)
power_supply_unregister(wm831x_power->battery);
power_supply_unregister(wm831x_power->wall);
power_supply_unregister(wm831x_power->usb);
- kfree(wm831x_power);
return 0;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 64bccff557be..8df0b0e62976 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -627,7 +627,7 @@ config REGULATOR_TI_ABB
config REGULATOR_STW481X_VMMC
bool "ST Microelectronics STW481X VMMC regulator"
- depends on MFD_STW481X
+ depends on MFD_STW481X || COMPILE_TEST
default y if MFD_STW481X
help
This driver supports the internal VMMC regulator in the STw481x
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 896db168e4bd..f8d4cd3d1397 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -261,6 +261,16 @@ static const struct regulator_desc act8865_regulators[] = {
ACT88xx_REG("LDO_REG4", ACT8865, LDO4, VSET, "inl67"),
};
+static const struct regulator_desc act8865_alt_regulators[] = {
+ ACT88xx_REG("DCDC_REG1", ACT8865, DCDC1, VSET2, "vp1"),
+ ACT88xx_REG("DCDC_REG2", ACT8865, DCDC2, VSET2, "vp2"),
+ ACT88xx_REG("DCDC_REG3", ACT8865, DCDC3, VSET2, "vp3"),
+ ACT88xx_REG("LDO_REG1", ACT8865, LDO1, VSET, "inl45"),
+ ACT88xx_REG("LDO_REG2", ACT8865, LDO2, VSET, "inl45"),
+ ACT88xx_REG("LDO_REG3", ACT8865, LDO3, VSET, "inl67"),
+ ACT88xx_REG("LDO_REG4", ACT8865, LDO4, VSET, "inl67"),
+};
+
#ifdef CONFIG_OF
static const struct of_device_id act8865_dt_ids[] = {
{ .compatible = "active-semi,act8600", .data = (void *)ACT8600 },
@@ -413,6 +423,7 @@ static int act8865_pmic_probe(struct i2c_client *client,
struct act8865 *act8865;
unsigned long type;
int off_reg, off_mask;
+ int voltage_select = 0;
pdata = dev_get_platdata(dev);
@@ -424,6 +435,10 @@ static int act8865_pmic_probe(struct i2c_client *client,
return -ENODEV;
type = (unsigned long) id->data;
+
+ voltage_select = !!of_get_property(dev->of_node,
+ "active-semi,vsel-high",
+ NULL);
} else {
type = i2c_id->driver_data;
}
@@ -442,8 +457,13 @@ static int act8865_pmic_probe(struct i2c_client *client,
off_mask = ACT8846_OFF_SYSMASK;
break;
case ACT8865:
- regulators = act8865_regulators;
- num_regulators = ARRAY_SIZE(act8865_regulators);
+ if (voltage_select) {
+ regulators = act8865_alt_regulators;
+ num_regulators = ARRAY_SIZE(act8865_alt_regulators);
+ } else {
+ regulators = act8865_regulators;
+ num_regulators = ARRAY_SIZE(act8865_regulators);
+ }
off_reg = ACT8865_SYS_CTRL;
off_mask = ACT8865_MSTROFF;
break;
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 52ea605f8130..63cd5e68c864 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -30,6 +30,7 @@
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
#define LDO_RAMP_UP_UNIT_IN_CYCLES 64 /* 64 cycles per step */
#define LDO_RAMP_UP_FREQ_IN_MHZ 24 /* cycle based on 24M OSC */
@@ -199,6 +200,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
rdesc->owner = THIS_MODULE;
initdata = of_get_regulator_init_data(dev, np, rdesc);
+ initdata->supply_regulator = "vin";
sreg->initdata = initdata;
anatop_np = of_get_parent(np);
@@ -262,6 +264,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
rdesc->vsel_reg = sreg->control_reg;
rdesc->vsel_mask = ((1 << sreg->vol_bit_width) - 1) <<
sreg->vol_bit_shift;
+ rdesc->min_dropout_uV = 125000;
config.dev = &pdev->dev;
config.init_data = initdata;
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index 5e947a8ddb84..f7c88ff90c43 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -17,6 +17,7 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -189,13 +190,22 @@ static int arizona_ldo1_of_get_pdata(struct arizona *arizona,
{
struct arizona_pdata *pdata = &arizona->pdata;
struct arizona_ldo1 *ldo1 = config->driver_data;
+ struct device_node *np = arizona->dev->of_node;
struct device_node *init_node, *dcvdd_node;
struct regulator_init_data *init_data;
- pdata->ldoena = arizona_of_get_named_gpio(arizona, "wlf,ldoena", true);
+ pdata->ldoena = of_get_named_gpio(np, "wlf,ldoena", 0);
+ if (pdata->ldoena < 0) {
+ dev_warn(arizona->dev,
+ "LDOENA GPIO property missing/malformed: %d\n",
+ pdata->ldoena);
+ pdata->ldoena = 0;
+ } else {
+ config->ena_gpio_initialized = true;
+ }
- init_node = of_get_child_by_name(arizona->dev->of_node, "ldo1");
- dcvdd_node = of_parse_phandle(arizona->dev->of_node, "DCVDD-supply", 0);
+ init_node = of_get_child_by_name(np, "ldo1");
+ dcvdd_node = of_parse_phandle(np, "DCVDD-supply", 0);
if (init_node) {
config->of_node = init_node;
@@ -245,6 +255,8 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
switch (arizona->type) {
case WM5102:
case WM8997:
+ case WM8998:
+ case WM1814:
desc = &arizona_ldo1_hc;
ldo1->init_data = arizona_ldo1_dvfs;
break;
@@ -272,8 +284,6 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
ret = arizona_ldo1_of_get_pdata(arizona, &config, desc);
if (ret < 0)
return ret;
-
- config.ena_gpio_initialized = true;
}
}
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index a9567af7cec0..35de22fdb7a0 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -196,10 +196,10 @@ static const struct regulator_desc axp22x_regulators[] = {
AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
/* secondary switchable output of DCDC1 */
- AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100,
+ AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", NULL, 1600, 3400, 100,
AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
/* LDO regulator internally chained to DCDC5 */
- AXP_DESC(AXP22X, DC5LDO, "dc5ldo", "dcdc5", 700, 1400, 100,
+ AXP_DESC(AXP22X, DC5LDO, "dc5ldo", NULL, 700, 1400, 100,
AXP22X_DC5LDO_V_OUT, 0x7, AXP22X_PWR_OUT_CTRL1, BIT(0)),
AXP_DESC(AXP22X, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
AXP22X_ALDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(6)),
@@ -350,6 +350,8 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
};
int ret, i, nregulators;
u32 workmode;
+ const char *axp22x_dc1_name = axp22x_regulators[AXP22X_DCDC1].name;
+ const char *axp22x_dc5_name = axp22x_regulators[AXP22X_DCDC5].name;
switch (axp20x->variant) {
case AXP202_ID:
@@ -371,8 +373,37 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
axp20x_regulator_parse_dt(pdev);
for (i = 0; i < nregulators; i++) {
- rdev = devm_regulator_register(&pdev->dev, &regulators[i],
- &config);
+ const struct regulator_desc *desc = &regulators[i];
+ struct regulator_desc *new_desc;
+
+ /*
+ * Regulators DC1SW and DC5LDO are connected internally,
+ * so we have to handle their supply names separately.
+ *
+ * We always register the regulators in proper sequence,
+ * so the supply names are correctly read. See the last
+ * part of this loop to see where we save the DT defined
+ * name.
+ */
+ if (regulators == axp22x_regulators) {
+ if (i == AXP22X_DC1SW) {
+ new_desc = devm_kzalloc(&pdev->dev,
+ sizeof(*desc),
+ GFP_KERNEL);
+ *new_desc = regulators[i];
+ new_desc->supply_name = axp22x_dc1_name;
+ desc = new_desc;
+ } else if (i == AXP22X_DC5LDO) {
+ new_desc = devm_kzalloc(&pdev->dev,
+ sizeof(*desc),
+ GFP_KERNEL);
+ *new_desc = regulators[i];
+ new_desc->supply_name = axp22x_dc5_name;
+ desc = new_desc;
+ }
+ }
+
+ rdev = devm_regulator_register(&pdev->dev, desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register %s\n",
regulators[i].name);
@@ -388,6 +419,21 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to set workmode on %s\n",
rdev->desc->name);
}
+
+ /*
+ * Save AXP22X DCDC1 / DCDC5 regulator names for later.
+ */
+ if (regulators == axp22x_regulators) {
+ /* Can we use rdev->constraints->name instead? */
+ if (i == AXP22X_DCDC1)
+ of_property_read_string(rdev->dev.of_node,
+ "regulator-name",
+ &axp22x_dc1_name);
+ else if (i == AXP22X_DCDC5)
+ of_property_read_string(rdev->dev.of_node,
+ "regulator-name",
+ &axp22x_dc5_name);
+ }
}
return 0;
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index 628430bdc312..76b01835dcb4 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -244,7 +244,7 @@ static int bcm590xx_get_enable_register(int id)
break;
case BCM590XX_REG_VBUS:
reg = BCM590XX_OTG_CTRL;
- };
+ }
return reg;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 8a34f6acc801..73b7683355cd 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -51,7 +51,6 @@
pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
static DEFINE_MUTEX(regulator_list_mutex);
-static LIST_HEAD(regulator_list);
static LIST_HEAD(regulator_map_list);
static LIST_HEAD(regulator_ena_gpio_list);
static LIST_HEAD(regulator_supply_alias_list);
@@ -59,6 +58,8 @@ static bool has_full_constraints;
static struct dentry *debugfs_root;
+static struct class regulator_class;
+
/*
* struct regulator_map
*
@@ -132,6 +133,45 @@ static bool have_full_constraints(void)
}
/**
+ * regulator_lock_supply - lock a regulator and its supplies
+ * @rdev: regulator source
+ */
+static void regulator_lock_supply(struct regulator_dev *rdev)
+{
+ struct regulator *supply;
+ int i = 0;
+
+ while (1) {
+ mutex_lock_nested(&rdev->mutex, i++);
+ supply = rdev->supply;
+
+ if (!rdev->supply)
+ return;
+
+ rdev = supply->rdev;
+ }
+}
+
+/**
+ * regulator_unlock_supply - unlock a regulator and its supplies
+ * @rdev: regulator source
+ */
+static void regulator_unlock_supply(struct regulator_dev *rdev)
+{
+ struct regulator *supply;
+
+ while (1) {
+ mutex_unlock(&rdev->mutex);
+ supply = rdev->supply;
+
+ if (!rdev->supply)
+ return;
+
+ rdev = supply->rdev;
+ }
+}
+
+/**
* of_get_regulator - get a regulator device node based on supply name
* @dev: Device pointer for the consumer (of regulator) device
* @supply: regulator supply name
@@ -180,7 +220,7 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
- rdev_err(rdev, "operation not allowed\n");
+ rdev_err(rdev, "voltage operation not allowed\n");
return -EPERM;
}
@@ -240,7 +280,7 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
- rdev_err(rdev, "operation not allowed\n");
+ rdev_err(rdev, "current operation not allowed\n");
return -EPERM;
}
@@ -277,7 +317,7 @@ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
- rdev_err(rdev, "operation not allowed\n");
+ rdev_err(rdev, "mode operation not allowed\n");
return -EPERM;
}
@@ -301,7 +341,7 @@ static int regulator_check_drms(struct regulator_dev *rdev)
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
- rdev_dbg(rdev, "operation not allowed\n");
+ rdev_dbg(rdev, "drms operation not allowed\n");
return -EPERM;
}
return 0;
@@ -1325,6 +1365,47 @@ static void regulator_supply_alias(struct device **dev, const char **supply)
}
}
+static int of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+static struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = class_find_device(&regulator_class, NULL, np, of_node_match);
+
+ return dev ? dev_to_rdev(dev) : NULL;
+}
+
+static int regulator_match(struct device *dev, const void *data)
+{
+ struct regulator_dev *r = dev_to_rdev(dev);
+
+ return strcmp(rdev_get_name(r), data) == 0;
+}
+
+static struct regulator_dev *regulator_lookup_by_name(const char *name)
+{
+ struct device *dev;
+
+ dev = class_find_device(&regulator_class, NULL, name, regulator_match);
+
+ return dev ? dev_to_rdev(dev) : NULL;
+}
+
+/**
+ * regulator_dev_lookup - lookup a regulator device.
+ * @dev: device for regulator "consumer".
+ * @supply: Supply name or regulator ID.
+ * @ret: 0 on success, -ENODEV if lookup fails permanently, -EPROBE_DEFER if
+ * lookup could succeed in the future.
+ *
+ * If successful, returns a struct regulator_dev that corresponds to the name
+ * @supply and with the embedded struct device refcount incremented by one,
+ * or NULL on failure. The refcount must be dropped by calling put_device().
+ */
static struct regulator_dev *regulator_dev_lookup(struct device *dev,
const char *supply,
int *ret)
@@ -1340,10 +1421,9 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
if (dev && dev->of_node) {
node = of_get_regulator(dev, supply);
if (node) {
- list_for_each_entry(r, &regulator_list, list)
- if (r->dev.parent &&
- node == r->dev.of_node)
- return r;
+ r = of_find_regulator_by_node(node);
+ if (r)
+ return r;
*ret = -EPROBE_DEFER;
return NULL;
} else {
@@ -1361,20 +1441,24 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
if (dev)
devname = dev_name(dev);
- list_for_each_entry(r, &regulator_list, list)
- if (strcmp(rdev_get_name(r), supply) == 0)
- return r;
+ r = regulator_lookup_by_name(supply);
+ if (r)
+ return r;
+ mutex_lock(&regulator_list_mutex);
list_for_each_entry(map, &regulator_map_list, list) {
/* If the mapping has a device set up it must match */
if (map->dev_name &&
(!devname || strcmp(map->dev_name, devname)))
continue;
- if (strcmp(map->supply, supply) == 0)
+ if (strcmp(map->supply, supply) == 0 &&
+ get_device(&map->regulator->dev)) {
+ mutex_unlock(&regulator_list_mutex);
return map->regulator;
+ }
}
-
+ mutex_unlock(&regulator_list_mutex);
return NULL;
}
@@ -1409,6 +1493,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (have_full_constraints()) {
r = dummy_regulator_rdev;
+ get_device(&r->dev);
} else {
dev_err(dev, "Failed to resolve %s-supply for %s\n",
rdev->supply_name, rdev->desc->name);
@@ -1418,12 +1503,16 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
/* Recursively resolve the supply of the supply */
ret = regulator_resolve_supply(r);
- if (ret < 0)
+ if (ret < 0) {
+ put_device(&r->dev);
return ret;
+ }
ret = set_supply(rdev, r);
- if (ret < 0)
+ if (ret < 0) {
+ put_device(&r->dev);
return ret;
+ }
/* Cascade always-on state to supply */
if (_regulator_is_enabled(rdev) && rdev->supply) {
@@ -1459,8 +1548,6 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
else
ret = -EPROBE_DEFER;
- mutex_lock(&regulator_list_mutex);
-
rdev = regulator_dev_lookup(dev, id, &ret);
if (rdev)
goto found;
@@ -1472,7 +1559,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
* succeed, so, quit with appropriate error value
*/
if (ret && ret != -ENODEV)
- goto out;
+ return regulator;
if (!devname)
devname = "deviceless";
@@ -1486,40 +1573,46 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
devname, id);
rdev = dummy_regulator_rdev;
+ get_device(&rdev->dev);
goto found;
/* Don't log an error when called from regulator_get_optional() */
} else if (!have_full_constraints() || exclusive) {
dev_warn(dev, "dummy supplies not allowed\n");
}
- mutex_unlock(&regulator_list_mutex);
return regulator;
found:
if (rdev->exclusive) {
regulator = ERR_PTR(-EPERM);
- goto out;
+ put_device(&rdev->dev);
+ return regulator;
}
if (exclusive && rdev->open_count) {
regulator = ERR_PTR(-EBUSY);
- goto out;
+ put_device(&rdev->dev);
+ return regulator;
}
ret = regulator_resolve_supply(rdev);
if (ret < 0) {
regulator = ERR_PTR(ret);
- goto out;
+ put_device(&rdev->dev);
+ return regulator;
}
- if (!try_module_get(rdev->owner))
- goto out;
+ if (!try_module_get(rdev->owner)) {
+ put_device(&rdev->dev);
+ return regulator;
+ }
regulator = create_regulator(rdev, dev, id);
if (regulator == NULL) {
regulator = ERR_PTR(-ENOMEM);
+ put_device(&rdev->dev);
module_put(rdev->owner);
- goto out;
+ return regulator;
}
rdev->open_count++;
@@ -1533,9 +1626,6 @@ found:
rdev->use_count = 0;
}
-out:
- mutex_unlock(&regulator_list_mutex);
-
return regulator;
}
@@ -1633,6 +1723,7 @@ static void _regulator_put(struct regulator *regulator)
rdev->open_count--;
rdev->exclusive = 0;
+ put_device(&rdev->dev);
mutex_unlock(&rdev->mutex);
kfree(regulator->supply_name);
@@ -2312,6 +2403,40 @@ static int _regulator_is_enabled(struct regulator_dev *rdev)
return rdev->desc->ops->is_enabled(rdev);
}
+static int _regulator_list_voltage(struct regulator *regulator,
+ unsigned selector, int lock)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ const struct regulator_ops *ops = rdev->desc->ops;
+ int ret;
+
+ if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector)
+ return rdev->desc->fixed_uV;
+
+ if (ops->list_voltage) {
+ if (selector >= rdev->desc->n_voltages)
+ return -EINVAL;
+ if (lock)
+ mutex_lock(&rdev->mutex);
+ ret = ops->list_voltage(rdev, selector);
+ if (lock)
+ mutex_unlock(&rdev->mutex);
+ } else if (rdev->supply) {
+ ret = _regulator_list_voltage(rdev->supply, selector, lock);
+ } else {
+ return -EINVAL;
+ }
+
+ if (ret > 0) {
+ if (ret < rdev->constraints->min_uV)
+ ret = 0;
+ else if (ret > rdev->constraints->max_uV)
+ ret = 0;
+ }
+
+ return ret;
+}
+
/**
* regulator_is_enabled - is the regulator output enabled
* @regulator: regulator source
@@ -2401,33 +2526,7 @@ EXPORT_SYMBOL_GPL(regulator_count_voltages);
*/
int regulator_list_voltage(struct regulator *regulator, unsigned selector)
{
- struct regulator_dev *rdev = regulator->rdev;
- const struct regulator_ops *ops = rdev->desc->ops;
- int ret;
-
- if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector)
- return rdev->desc->fixed_uV;
-
- if (ops->list_voltage) {
- if (selector >= rdev->desc->n_voltages)
- return -EINVAL;
- mutex_lock(&rdev->mutex);
- ret = ops->list_voltage(rdev, selector);
- mutex_unlock(&rdev->mutex);
- } else if (rdev->supply) {
- ret = regulator_list_voltage(rdev->supply, selector);
- } else {
- return -EINVAL;
- }
-
- if (ret > 0) {
- if (ret < rdev->constraints->min_uV)
- ret = 0;
- else if (ret > rdev->constraints->max_uV)
- ret = 0;
- }
-
- return ret;
+ return _regulator_list_voltage(regulator, selector, 1);
}
EXPORT_SYMBOL_GPL(regulator_list_voltage);
@@ -2562,6 +2661,23 @@ int regulator_is_supported_voltage(struct regulator *regulator,
}
EXPORT_SYMBOL_GPL(regulator_is_supported_voltage);
+static int regulator_map_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV)
+{
+ const struct regulator_desc *desc = rdev->desc;
+
+ if (desc->ops->map_voltage)
+ return desc->ops->map_voltage(rdev, min_uV, max_uV);
+
+ if (desc->ops->list_voltage == regulator_list_voltage_linear)
+ return regulator_map_voltage_linear(rdev, min_uV, max_uV);
+
+ if (desc->ops->list_voltage == regulator_list_voltage_linear_range)
+ return regulator_map_voltage_linear_range(rdev, min_uV, max_uV);
+
+ return regulator_map_voltage_iterate(rdev, min_uV, max_uV);
+}
+
static int _regulator_call_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV,
unsigned *selector)
@@ -2650,23 +2766,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
}
} else if (rdev->desc->ops->set_voltage_sel) {
- if (rdev->desc->ops->map_voltage) {
- ret = rdev->desc->ops->map_voltage(rdev, min_uV,
- max_uV);
- } else {
- if (rdev->desc->ops->list_voltage ==
- regulator_list_voltage_linear)
- ret = regulator_map_voltage_linear(rdev,
- min_uV, max_uV);
- else if (rdev->desc->ops->list_voltage ==
- regulator_list_voltage_linear_range)
- ret = regulator_map_voltage_linear_range(rdev,
- min_uV, max_uV);
- else
- ret = regulator_map_voltage_iterate(rdev,
- min_uV, max_uV);
- }
-
+ ret = regulator_map_voltage(rdev, min_uV, max_uV);
if (ret >= 0) {
best_val = rdev->desc->ops->list_voltage(rdev, ret);
if (min_uV <= best_val && max_uV >= best_val) {
@@ -2717,32 +2817,15 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
return ret;
}
-/**
- * regulator_set_voltage - set regulator output voltage
- * @regulator: regulator source
- * @min_uV: Minimum required voltage in uV
- * @max_uV: Maximum acceptable voltage in uV
- *
- * Sets a voltage regulator to the desired output voltage. This can be set
- * during any regulator state. IOW, regulator can be disabled or enabled.
- *
- * If the regulator is enabled then the voltage will change to the new value
- * immediately otherwise if the regulator is disabled the regulator will
- * output at the new voltage when enabled.
- *
- * NOTE: If the regulator is shared between several devices then the lowest
- * request voltage that meets the system constraints will be used.
- * Regulator system constraints must be set for this regulator before
- * calling this function otherwise this call will fail.
- */
-int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
+static int regulator_set_voltage_unlocked(struct regulator *regulator,
+ int min_uV, int max_uV)
{
struct regulator_dev *rdev = regulator->rdev;
int ret = 0;
int old_min_uV, old_max_uV;
int current_uV;
-
- mutex_lock(&rdev->mutex);
+ int best_supply_uV = 0;
+ int supply_change_uV = 0;
/* If we're setting the same range as last time the change
* should be a noop (some cpufreq implementations use the same
@@ -2786,17 +2869,95 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
if (ret < 0)
goto out2;
+ if (rdev->supply && (rdev->desc->min_dropout_uV ||
+ !rdev->desc->ops->get_voltage)) {
+ int current_supply_uV;
+ int selector;
+
+ selector = regulator_map_voltage(rdev, min_uV, max_uV);
+ if (selector < 0) {
+ ret = selector;
+ goto out2;
+ }
+
+ best_supply_uV = _regulator_list_voltage(regulator, selector, 0);
+ if (best_supply_uV < 0) {
+ ret = best_supply_uV;
+ goto out2;
+ }
+
+ best_supply_uV += rdev->desc->min_dropout_uV;
+
+ current_supply_uV = _regulator_get_voltage(rdev->supply->rdev);
+ if (current_supply_uV < 0) {
+ ret = current_supply_uV;
+ goto out2;
+ }
+
+ supply_change_uV = best_supply_uV - current_supply_uV;
+ }
+
+ if (supply_change_uV > 0) {
+ ret = regulator_set_voltage_unlocked(rdev->supply,
+ best_supply_uV, INT_MAX);
+ if (ret) {
+ dev_err(&rdev->dev, "Failed to increase supply voltage: %d\n",
+ ret);
+ goto out2;
+ }
+ }
+
ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
if (ret < 0)
goto out2;
+ if (supply_change_uV < 0) {
+ ret = regulator_set_voltage_unlocked(rdev->supply,
+ best_supply_uV, INT_MAX);
+ if (ret)
+ dev_warn(&rdev->dev, "Failed to decrease supply voltage: %d\n",
+ ret);
+ /* No need to fail here */
+ ret = 0;
+ }
+
out:
- mutex_unlock(&rdev->mutex);
return ret;
out2:
regulator->min_uV = old_min_uV;
regulator->max_uV = old_max_uV;
- mutex_unlock(&rdev->mutex);
+
+ return ret;
+}
+
+/**
+ * regulator_set_voltage - set regulator output voltage
+ * @regulator: regulator source
+ * @min_uV: Minimum required voltage in uV
+ * @max_uV: Maximum acceptable voltage in uV
+ *
+ * Sets a voltage regulator to the desired output voltage. This can be set
+ * during any regulator state. IOW, regulator can be disabled or enabled.
+ *
+ * If the regulator is enabled then the voltage will change to the new value
+ * immediately otherwise if the regulator is disabled the regulator will
+ * output at the new voltage when enabled.
+ *
+ * NOTE: If the regulator is shared between several devices then the lowest
+ * request voltage that meets the system constraints will be used.
+ * Regulator system constraints must be set for this regulator before
+ * calling this function otherwise this call will fail.
+ */
+int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
+{
+ int ret = 0;
+
+ regulator_lock_supply(regulator->rdev);
+
+ ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV);
+
+ regulator_unlock_supply(regulator->rdev);
+
return ret;
}
EXPORT_SYMBOL_GPL(regulator_set_voltage);
@@ -2949,7 +3110,7 @@ static int _regulator_get_voltage(struct regulator_dev *rdev)
} else if (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1)) {
ret = rdev->desc->fixed_uV;
} else if (rdev->supply) {
- ret = regulator_get_voltage(rdev->supply);
+ ret = _regulator_get_voltage(rdev->supply->rdev);
} else {
return -EINVAL;
}
@@ -2972,11 +3133,11 @@ int regulator_get_voltage(struct regulator *regulator)
{
int ret;
- mutex_lock(&regulator->rdev->mutex);
+ regulator_lock_supply(regulator->rdev);
ret = _regulator_get_voltage(regulator->rdev);
- mutex_unlock(&regulator->rdev->mutex);
+ regulator_unlock_supply(regulator->rdev);
return ret;
}
@@ -3810,8 +3971,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
}
}
- list_add(&rdev->list, &regulator_list);
-
rdev_init_debugfs(rdev);
out:
mutex_unlock(&regulator_list_mutex);
@@ -3865,6 +4024,19 @@ void regulator_unregister(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(regulator_unregister);
+static int _regulator_suspend_prepare(struct device *dev, void *data)
+{
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+ const suspend_state_t *state = data;
+ int ret;
+
+ mutex_lock(&rdev->mutex);
+ ret = suspend_prepare(rdev, *state);
+ mutex_unlock(&rdev->mutex);
+
+ return ret;
+}
+
/**
* regulator_suspend_prepare - prepare regulators for system wide suspend
* @state: system suspend state
@@ -3874,30 +4046,45 @@ EXPORT_SYMBOL_GPL(regulator_unregister);
*/
int regulator_suspend_prepare(suspend_state_t state)
{
- struct regulator_dev *rdev;
- int ret = 0;
-
/* ON is handled by regulator active state */
if (state == PM_SUSPEND_ON)
return -EINVAL;
- mutex_lock(&regulator_list_mutex);
- list_for_each_entry(rdev, &regulator_list, list) {
+ return class_for_each_device(&regulator_class, NULL, &state,
+ _regulator_suspend_prepare);
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
- mutex_lock(&rdev->mutex);
- ret = suspend_prepare(rdev, state);
- mutex_unlock(&rdev->mutex);
+static int _regulator_suspend_finish(struct device *dev, void *data)
+{
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+ int ret;
- if (ret < 0) {
- rdev_err(rdev, "failed to prepare\n");
- goto out;
+ mutex_lock(&rdev->mutex);
+ if (rdev->use_count > 0 || rdev->constraints->always_on) {
+ if (!_regulator_is_enabled(rdev)) {
+ ret = _regulator_do_enable(rdev);
+ if (ret)
+ dev_err(dev,
+ "Failed to resume regulator %d\n",
+ ret);
}
+ } else {
+ if (!have_full_constraints())
+ goto unlock;
+ if (!_regulator_is_enabled(rdev))
+ goto unlock;
+
+ ret = _regulator_do_disable(rdev);
+ if (ret)
+ dev_err(dev, "Failed to suspend regulator %d\n", ret);
}
-out:
- mutex_unlock(&regulator_list_mutex);
- return ret;
+unlock:
+ mutex_unlock(&rdev->mutex);
+
+ /* Keep processing regulators in spite of any errors */
+ return 0;
}
-EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
/**
* regulator_suspend_finish - resume regulators from system wide suspend
@@ -3907,33 +4094,8 @@ EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
*/
int regulator_suspend_finish(void)
{
- struct regulator_dev *rdev;
- int ret = 0, error;
-
- mutex_lock(&regulator_list_mutex);
- list_for_each_entry(rdev, &regulator_list, list) {
- mutex_lock(&rdev->mutex);
- if (rdev->use_count > 0 || rdev->constraints->always_on) {
- if (!_regulator_is_enabled(rdev)) {
- error = _regulator_do_enable(rdev);
- if (error)
- ret = error;
- }
- } else {
- if (!have_full_constraints())
- goto unlock;
- if (!_regulator_is_enabled(rdev))
- goto unlock;
-
- error = _regulator_do_disable(rdev);
- if (error)
- ret = error;
- }
-unlock:
- mutex_unlock(&rdev->mutex);
- }
- mutex_unlock(&regulator_list_mutex);
- return ret;
+ return class_for_each_device(&regulator_class, NULL, NULL,
+ _regulator_suspend_finish);
}
EXPORT_SYMBOL_GPL(regulator_suspend_finish);
@@ -4053,14 +4215,35 @@ static const struct file_operations supply_map_fops = {
};
#ifdef CONFIG_DEBUG_FS
+struct summary_data {
+ struct seq_file *s;
+ struct regulator_dev *parent;
+ int level;
+};
+
+static void regulator_summary_show_subtree(struct seq_file *s,
+ struct regulator_dev *rdev,
+ int level);
+
+static int regulator_summary_show_children(struct device *dev, void *data)
+{
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+ struct summary_data *summary_data = data;
+
+ if (rdev->supply && rdev->supply->rdev == summary_data->parent)
+ regulator_summary_show_subtree(summary_data->s, rdev,
+ summary_data->level + 1);
+
+ return 0;
+}
+
static void regulator_summary_show_subtree(struct seq_file *s,
struct regulator_dev *rdev,
int level)
{
- struct list_head *list = s->private;
- struct regulator_dev *child;
struct regulation_constraints *c;
struct regulator *consumer;
+ struct summary_data summary_data;
if (!rdev)
return;
@@ -4110,33 +4293,32 @@ static void regulator_summary_show_subtree(struct seq_file *s,
seq_puts(s, "\n");
}
- list_for_each_entry(child, list, list) {
- /* handle only non-root regulators supplied by current rdev */
- if (!child->supply || child->supply->rdev != rdev)
- continue;
+ summary_data.s = s;
+ summary_data.level = level;
+ summary_data.parent = rdev;
- regulator_summary_show_subtree(s, child, level + 1);
- }
+ class_for_each_device(&regulator_class, NULL, &summary_data,
+ regulator_summary_show_children);
}
-static int regulator_summary_show(struct seq_file *s, void *data)
+static int regulator_summary_show_roots(struct device *dev, void *data)
{
- struct list_head *list = s->private;
- struct regulator_dev *rdev;
-
- seq_puts(s, " regulator use open bypass voltage current min max\n");
- seq_puts(s, "-------------------------------------------------------------------------------\n");
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+ struct seq_file *s = data;
- mutex_lock(&regulator_list_mutex);
+ if (!rdev->supply)
+ regulator_summary_show_subtree(s, rdev, 0);
- list_for_each_entry(rdev, list, list) {
- if (rdev->supply)
- continue;
+ return 0;
+}
- regulator_summary_show_subtree(s, rdev, 0);
- }
+static int regulator_summary_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, " regulator use open bypass voltage current min max\n");
+ seq_puts(s, "-------------------------------------------------------------------------------\n");
- mutex_unlock(&regulator_list_mutex);
+ class_for_each_device(&regulator_class, NULL, s,
+ regulator_summary_show_roots);
return 0;
}
@@ -4170,7 +4352,7 @@ static int __init regulator_init(void)
&supply_map_fops);
debugfs_create_file("regulator_summary", 0444, debugfs_root,
- &regulator_list, &regulator_summary_fops);
+ NULL, &regulator_summary_fops);
regulator_dummy_init();
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index e628d4c2f2ae..12a25b40e473 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -381,6 +381,7 @@ static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
case DA9053_AA:
case DA9053_BA:
case DA9053_BB:
+ case DA9053_BC:
for (i = 0; i < ARRAY_SIZE(da9053_regulator_info); i++) {
info = &da9053_regulator_info[i];
if (info->reg_desc.id == id)
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index aed1ad3dc964..536e931eb921 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -698,7 +698,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
rdata->initdata = da9063_matches[i].init_data;
n++;
- };
+ }
*da9063_reg_matches = da9063_matches;
return pdata;
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 250700c853bf..499e437c7e91 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -76,6 +76,9 @@ static void of_get_regulation_constraints(struct device_node *np,
if (of_property_read_bool(np, "regulator-allow-bypass"))
constraints->valid_ops_mask |= REGULATOR_CHANGE_BYPASS;
+ if (of_property_read_bool(np, "regulator-allow-set-load"))
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_DRMS;
+
ret = of_property_read_u32(np, "regulator-ramp-delay", &pval);
if (!ret) {
if (pval)
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index fc3166dfcbfa..3aca067b9901 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -69,12 +69,6 @@ static int pwm_regulator_set_voltage_sel(struct regulator_dev *rdev,
drvdata->state = selector;
- ret = pwm_enable(drvdata->pwm);
- if (ret) {
- dev_err(&rdev->dev, "Failed to enable PWM\n");
- return ret;
- }
-
return 0;
}
@@ -89,6 +83,29 @@ static int pwm_regulator_list_voltage(struct regulator_dev *rdev,
return drvdata->duty_cycle_table[selector].uV;
}
+static int pwm_regulator_enable(struct regulator_dev *dev)
+{
+ struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+
+ return pwm_enable(drvdata->pwm);
+}
+
+static int pwm_regulator_disable(struct regulator_dev *dev)
+{
+ struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+
+ pwm_disable(drvdata->pwm);
+
+ return 0;
+}
+
+static int pwm_regulator_is_enabled(struct regulator_dev *dev)
+{
+ struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+
+ return pwm_is_enabled(drvdata->pwm);
+}
+
/**
* Continuous voltage call-backs
*/
@@ -144,11 +161,17 @@ static struct regulator_ops pwm_regulator_voltage_table_ops = {
.get_voltage_sel = pwm_regulator_get_voltage_sel,
.list_voltage = pwm_regulator_list_voltage,
.map_voltage = regulator_map_voltage_iterate,
+ .enable = pwm_regulator_enable,
+ .disable = pwm_regulator_disable,
+ .is_enabled = pwm_regulator_is_enabled,
};
static struct regulator_ops pwm_regulator_voltage_continuous_ops = {
.get_voltage = pwm_regulator_get_voltage,
.set_voltage = pwm_regulator_set_voltage,
+ .enable = pwm_regulator_enable,
+ .disable = pwm_regulator_disable,
+ .is_enabled = pwm_regulator_is_enabled,
};
static struct regulator_desc pwm_regulator_desc = {
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 9c6167dd2c8b..6fa0c7d13290 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -36,9 +36,9 @@ struct qcom_rpm_reg {
};
struct rpm_regulator_req {
- u32 key;
- u32 nbytes;
- u32 value;
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
};
#define RPM_KEY_SWEN 0x6e657773 /* "swen" */
@@ -62,9 +62,9 @@ static int rpm_reg_enable(struct regulator_dev *rdev)
struct rpm_regulator_req req;
int ret;
- req.key = RPM_KEY_SWEN;
- req.nbytes = sizeof(u32);
- req.value = 1;
+ req.key = cpu_to_le32(RPM_KEY_SWEN);
+ req.nbytes = cpu_to_le32(sizeof(u32));
+ req.value = cpu_to_le32(1);
ret = rpm_reg_write_active(vreg, &req, sizeof(req));
if (!ret)
@@ -86,8 +86,8 @@ static int rpm_reg_disable(struct regulator_dev *rdev)
struct rpm_regulator_req req;
int ret;
- req.key = RPM_KEY_SWEN;
- req.nbytes = sizeof(u32);
+ req.key = cpu_to_le32(RPM_KEY_SWEN);
+ req.nbytes = cpu_to_le32(sizeof(u32));
req.value = 0;
ret = rpm_reg_write_active(vreg, &req, sizeof(req));
@@ -113,9 +113,9 @@ static int rpm_reg_set_voltage(struct regulator_dev *rdev,
struct rpm_regulator_req req;
int ret = 0;
- req.key = RPM_KEY_UV;
- req.nbytes = sizeof(u32);
- req.value = min_uV;
+ req.key = cpu_to_le32(RPM_KEY_UV);
+ req.nbytes = cpu_to_le32(sizeof(u32));
+ req.value = cpu_to_le32(min_uV);
ret = rpm_reg_write_active(vreg, &req, sizeof(req));
if (!ret)
@@ -129,9 +129,9 @@ static int rpm_reg_set_load(struct regulator_dev *rdev, int load_uA)
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
struct rpm_regulator_req req;
- req.key = RPM_KEY_MA;
- req.nbytes = sizeof(u32);
- req.value = load_uA;
+ req.key = cpu_to_le32(RPM_KEY_MA);
+ req.nbytes = cpu_to_le32(sizeof(u32));
+ req.value = cpu_to_le32(load_uA / 1000);
return rpm_reg_write_active(vreg, &req, sizeof(req));
}
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index 3510b3e7330a..ddc4f10e268a 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
-#include <linux/i2c.h>
+#include <linux/regmap.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/mfd/core.h>
@@ -33,7 +33,7 @@ static int tps6105x_regulator_enable(struct regulator_dev *rdev)
int ret;
/* Activate voltage mode */
- ret = tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0,
+ ret = regmap_update_bits(tps6105x->regmap, TPS6105X_REG_0,
TPS6105X_REG0_MODE_MASK,
TPS6105X_REG0_MODE_VOLTAGE << TPS6105X_REG0_MODE_SHIFT);
if (ret)
@@ -48,7 +48,7 @@ static int tps6105x_regulator_disable(struct regulator_dev *rdev)
int ret;
/* Set into shutdown mode */
- ret = tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0,
+ ret = regmap_update_bits(tps6105x->regmap, TPS6105X_REG_0,
TPS6105X_REG0_MODE_MASK,
TPS6105X_REG0_MODE_SHUTDOWN << TPS6105X_REG0_MODE_SHIFT);
if (ret)
@@ -60,10 +60,10 @@ static int tps6105x_regulator_disable(struct regulator_dev *rdev)
static int tps6105x_regulator_is_enabled(struct regulator_dev *rdev)
{
struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
- u8 regval;
+ unsigned int regval;
int ret;
- ret = tps6105x_get(tps6105x, TPS6105X_REG_0, &regval);
+ ret = regmap_read(tps6105x->regmap, TPS6105X_REG_0, &regval);
if (ret)
return ret;
regval &= TPS6105X_REG0_MODE_MASK;
@@ -78,10 +78,10 @@ static int tps6105x_regulator_is_enabled(struct regulator_dev *rdev)
static int tps6105x_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
- u8 regval;
+ unsigned int regval;
int ret;
- ret = tps6105x_get(tps6105x, TPS6105X_REG_0, &regval);
+ ret = regmap_read(tps6105x->regmap, TPS6105X_REG_0, &regval);
if (ret)
return ret;
@@ -96,7 +96,7 @@ static int tps6105x_regulator_set_voltage_sel(struct regulator_dev *rdev,
struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
int ret;
- ret = tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0,
+ ret = regmap_update_bits(tps6105x->regmap, TPS6105X_REG_0,
TPS6105X_REG0_VOLTAGE_MASK,
selector << TPS6105X_REG0_VOLTAGE_SHIFT);
if (ret)
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 5cc19b44974a..d2c3d7cc35f5 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -86,6 +86,42 @@
#define TPS65023_MAX_REG_ID TPS65023_LDO_2
+#define TPS65023_REGULATOR_DCDC(_num, _t, _em) \
+ { \
+ .name = "VDCDC"#_num, \
+ .of_match = of_match_ptr("VDCDC"#_num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = TPS65023_DCDC_##_num, \
+ .n_voltages = ARRAY_SIZE(_t), \
+ .ops = &tps65023_dcdc_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .volt_table = _t, \
+ .vsel_reg = TPS65023_REG_DEF_CORE, \
+ .vsel_mask = ARRAY_SIZE(_t) - 1, \
+ .enable_mask = _em, \
+ .enable_reg = TPS65023_REG_REG_CTRL, \
+ .apply_reg = TPS65023_REG_CON_CTRL2, \
+ .apply_bit = TPS65023_REG_CTRL2_GO, \
+ } \
+
+#define TPS65023_REGULATOR_LDO(_num, _t, _vm) \
+ { \
+ .name = "LDO"#_num, \
+ .of_match = of_match_ptr("LDO"#_num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = TPS65023_LDO_##_num, \
+ .n_voltages = ARRAY_SIZE(_t), \
+ .ops = &tps65023_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .volt_table = _t, \
+ .vsel_reg = TPS65023_REG_LDO_CTRL, \
+ .vsel_mask = _vm, \
+ .enable_mask = 1 << (_num), \
+ .enable_reg = TPS65023_REG_REG_CTRL, \
+ } \
+
/* Supported voltage values for regulators */
static const unsigned int VCORE_VSEL_table[] = {
800000, 825000, 850000, 875000,
@@ -124,25 +160,16 @@ static const unsigned int TPS65023_LDO2_VSEL_table[] = {
2500000, 2800000, 3000000, 3300000,
};
-/* Regulator specific details */
-struct tps_info {
- const char *name;
- u8 table_len;
- const unsigned int *table;
-};
-
/* PMIC details */
struct tps_pmic {
- struct regulator_desc desc[TPS65023_NUM_REGULATOR];
struct regulator_dev *rdev[TPS65023_NUM_REGULATOR];
- const struct tps_info *info[TPS65023_NUM_REGULATOR];
+ const struct tps_driver_data *driver_data;
struct regmap *regmap;
- u8 core_regulator;
};
/* Struct passed as driver data */
struct tps_driver_data {
- const struct tps_info *info;
+ const struct regulator_desc *desc;
u8 core_regulator;
};
@@ -154,7 +181,7 @@ static int tps65023_dcdc_get_voltage_sel(struct regulator_dev *dev)
if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
return -EINVAL;
- if (dcdc != tps->core_regulator)
+ if (dcdc != tps->driver_data->core_regulator)
return 0;
return regulator_get_voltage_sel_regmap(dev);
@@ -166,7 +193,7 @@ static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev,
struct tps_pmic *tps = rdev_get_drvdata(dev);
int dcdc = rdev_get_id(dev);
- if (dcdc != tps->core_regulator)
+ if (dcdc != tps->driver_data->core_regulator)
return -EINVAL;
return regulator_set_voltage_sel_regmap(dev, selector);
@@ -199,30 +226,60 @@ static const struct regmap_config tps65023_regmap_config = {
.val_bits = 8,
};
+static const struct regulator_desc tps65020_regulators[] = {
+ TPS65023_REGULATOR_DCDC(1, DCDC_FIXED_3300000_VSEL_table, 0x20),
+ TPS65023_REGULATOR_DCDC(2, DCDC_FIXED_1800000_VSEL_table, 0x10),
+ TPS65023_REGULATOR_DCDC(3, VCORE_VSEL_table, 0x08),
+ TPS65023_REGULATOR_LDO(1, TPS65020_LDO_VSEL_table, 0x07),
+ TPS65023_REGULATOR_LDO(2, TPS65020_LDO_VSEL_table, 0x70),
+};
+
+static const struct regulator_desc tps65021_regulators[] = {
+ TPS65023_REGULATOR_DCDC(1, DCDC_FIXED_3300000_VSEL_table, 0x20),
+ TPS65023_REGULATOR_DCDC(2, DCDC_FIXED_1800000_VSEL_table, 0x10),
+ TPS65023_REGULATOR_DCDC(3, VCORE_VSEL_table, 0x08),
+ TPS65023_REGULATOR_LDO(1, TPS65023_LDO1_VSEL_table, 0x07),
+ TPS65023_REGULATOR_LDO(2, TPS65023_LDO2_VSEL_table, 0x70),
+};
+
+static const struct regulator_desc tps65023_regulators[] = {
+ TPS65023_REGULATOR_DCDC(1, VCORE_VSEL_table, 0x20),
+ TPS65023_REGULATOR_DCDC(2, DCDC_FIXED_3300000_VSEL_table, 0x10),
+ TPS65023_REGULATOR_DCDC(3, DCDC_FIXED_1800000_VSEL_table, 0x08),
+ TPS65023_REGULATOR_LDO(1, TPS65023_LDO1_VSEL_table, 0x07),
+ TPS65023_REGULATOR_LDO(2, TPS65023_LDO2_VSEL_table, 0x70),
+};
+
+static struct tps_driver_data tps65020_drv_data = {
+ .desc = tps65020_regulators,
+ .core_regulator = TPS65023_DCDC_3,
+};
+
+static struct tps_driver_data tps65021_drv_data = {
+ .desc = tps65021_regulators,
+ .core_regulator = TPS65023_DCDC_3,
+};
+
+static struct tps_driver_data tps65023_drv_data = {
+ .desc = tps65023_regulators,
+ .core_regulator = TPS65023_DCDC_1,
+};
+
static int tps_65023_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct tps_driver_data *drv_data = (void *)id->driver_data;
- const struct tps_info *info = drv_data->info;
+ struct regulator_init_data *init_data = dev_get_platdata(&client->dev);
struct regulator_config config = { };
- struct regulator_init_data *init_data;
- struct regulator_dev *rdev;
struct tps_pmic *tps;
int i;
int error;
- /**
- * init_data points to array of regulator_init structures
- * coming from the board-evm file.
- */
- init_data = dev_get_platdata(&client->dev);
- if (!init_data)
- return -EIO;
-
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
return -ENOMEM;
+ tps->driver_data = (struct tps_driver_data *)id->driver_data;
+
tps->regmap = devm_regmap_init_i2c(client, &tps65023_regmap_config);
if (IS_ERR(tps->regmap)) {
error = PTR_ERR(tps->regmap);
@@ -232,58 +289,22 @@ static int tps_65023_probe(struct i2c_client *client,
}
/* common for all regulators */
- tps->core_regulator = drv_data->core_regulator;
-
- for (i = 0; i < TPS65023_NUM_REGULATOR; i++, info++, init_data++) {
- /* Store regulator specific information */
- tps->info[i] = info;
-
- tps->desc[i].name = info->name;
- tps->desc[i].id = i;
- tps->desc[i].n_voltages = info->table_len;
- tps->desc[i].volt_table = info->table;
- tps->desc[i].ops = (i > TPS65023_DCDC_3 ?
- &tps65023_ldo_ops : &tps65023_dcdc_ops);
- tps->desc[i].type = REGULATOR_VOLTAGE;
- tps->desc[i].owner = THIS_MODULE;
-
- tps->desc[i].enable_reg = TPS65023_REG_REG_CTRL;
- switch (i) {
- case TPS65023_LDO_1:
- tps->desc[i].vsel_reg = TPS65023_REG_LDO_CTRL;
- tps->desc[i].vsel_mask = 0x07;
- tps->desc[i].enable_mask = 1 << 1;
- break;
- case TPS65023_LDO_2:
- tps->desc[i].vsel_reg = TPS65023_REG_LDO_CTRL;
- tps->desc[i].vsel_mask = 0x70;
- tps->desc[i].enable_mask = 1 << 2;
- break;
- default: /* DCDCx */
- tps->desc[i].enable_mask =
- 1 << (TPS65023_NUM_REGULATOR - i);
- tps->desc[i].vsel_reg = TPS65023_REG_DEF_CORE;
- tps->desc[i].vsel_mask = info->table_len - 1;
- tps->desc[i].apply_reg = TPS65023_REG_CON_CTRL2;
- tps->desc[i].apply_bit = TPS65023_REG_CTRL2_GO;
- }
+ config.dev = &client->dev;
+ config.driver_data = tps;
+ config.regmap = tps->regmap;
- config.dev = &client->dev;
- config.init_data = init_data;
- config.driver_data = tps;
- config.regmap = tps->regmap;
+ for (i = 0; i < TPS65023_NUM_REGULATOR; i++) {
+ if (init_data)
+ config.init_data = &init_data[i];
/* Register the regulators */
- rdev = devm_regulator_register(&client->dev, &tps->desc[i],
- &config);
- if (IS_ERR(rdev)) {
+ tps->rdev[i] = devm_regulator_register(&client->dev,
+ &tps->driver_data->desc[i], &config);
+ if (IS_ERR(tps->rdev[i])) {
dev_err(&client->dev, "failed to register %s\n",
id->name);
- return PTR_ERR(rdev);
+ return PTR_ERR(tps->rdev[i]);
}
-
- /* Save regulator for cleanup */
- tps->rdev[i] = rdev;
}
i2c_set_clientdata(client, tps);
@@ -296,120 +317,33 @@ static int tps_65023_probe(struct i2c_client *client,
return 0;
}
-static const struct tps_info tps65020_regs[] = {
- {
- .name = "VDCDC1",
- .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table),
- .table = DCDC_FIXED_3300000_VSEL_table,
- },
- {
- .name = "VDCDC2",
- .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table),
- .table = DCDC_FIXED_1800000_VSEL_table,
- },
- {
- .name = "VDCDC3",
- .table_len = ARRAY_SIZE(VCORE_VSEL_table),
- .table = VCORE_VSEL_table,
- },
- {
- .name = "LDO1",
- .table_len = ARRAY_SIZE(TPS65020_LDO_VSEL_table),
- .table = TPS65020_LDO_VSEL_table,
- },
- {
- .name = "LDO2",
- .table_len = ARRAY_SIZE(TPS65020_LDO_VSEL_table),
- .table = TPS65020_LDO_VSEL_table,
- },
-};
-
-static const struct tps_info tps65021_regs[] = {
- {
- .name = "VDCDC1",
- .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table),
- .table = DCDC_FIXED_3300000_VSEL_table,
- },
- {
- .name = "VDCDC2",
- .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table),
- .table = DCDC_FIXED_1800000_VSEL_table,
- },
- {
- .name = "VDCDC3",
- .table_len = ARRAY_SIZE(VCORE_VSEL_table),
- .table = VCORE_VSEL_table,
- },
- {
- .name = "LDO1",
- .table_len = ARRAY_SIZE(TPS65023_LDO1_VSEL_table),
- .table = TPS65023_LDO1_VSEL_table,
- },
- {
- .name = "LDO2",
- .table_len = ARRAY_SIZE(TPS65023_LDO2_VSEL_table),
- .table = TPS65023_LDO2_VSEL_table,
- },
+static const struct of_device_id tps65023_of_match[] = {
+ { .compatible = "ti,tps65020", .data = &tps65020_drv_data},
+ { .compatible = "ti,tps65021", .data = &tps65021_drv_data},
+ { .compatible = "ti,tps65023", .data = &tps65023_drv_data},
+ {},
};
+MODULE_DEVICE_TABLE(of, tps65023_of_match);
-static const struct tps_info tps65023_regs[] = {
- {
- .name = "VDCDC1",
- .table_len = ARRAY_SIZE(VCORE_VSEL_table),
- .table = VCORE_VSEL_table,
- },
- {
- .name = "VDCDC2",
- .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table),
- .table = DCDC_FIXED_3300000_VSEL_table,
- },
- {
- .name = "VDCDC3",
- .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table),
- .table = DCDC_FIXED_1800000_VSEL_table,
- },
- {
- .name = "LDO1",
- .table_len = ARRAY_SIZE(TPS65023_LDO1_VSEL_table),
- .table = TPS65023_LDO1_VSEL_table,
- },
+static const struct i2c_device_id tps_65023_id[] = {
{
- .name = "LDO2",
- .table_len = ARRAY_SIZE(TPS65023_LDO2_VSEL_table),
- .table = TPS65023_LDO2_VSEL_table,
+ .name = "tps65023",
+ .driver_data = (kernel_ulong_t)&tps65023_drv_data
+ }, {
+ .name = "tps65021",
+ .driver_data = (kernel_ulong_t)&tps65021_drv_data
+ }, {
+ .name = "tps65020",
+ .driver_data = (kernel_ulong_t)&tps65020_drv_data
},
-};
-
-static struct tps_driver_data tps65020_drv_data = {
- .info = tps65020_regs,
- .core_regulator = TPS65023_DCDC_3,
-};
-
-static struct tps_driver_data tps65021_drv_data = {
- .info = tps65021_regs,
- .core_regulator = TPS65023_DCDC_3,
-};
-
-static struct tps_driver_data tps65023_drv_data = {
- .info = tps65023_regs,
- .core_regulator = TPS65023_DCDC_1,
-};
-
-static const struct i2c_device_id tps_65023_id[] = {
- {.name = "tps65023",
- .driver_data = (unsigned long) &tps65023_drv_data},
- {.name = "tps65021",
- .driver_data = (unsigned long) &tps65021_drv_data,},
- {.name = "tps65020",
- .driver_data = (unsigned long) &tps65020_drv_data},
{ },
};
-
MODULE_DEVICE_TABLE(i2c, tps_65023_id);
static struct i2c_driver tps_65023_i2c_driver = {
.driver = {
.name = "tps65023",
+ .of_match_table = of_match_ptr(tps65023_of_match),
},
.probe = tps_65023_probe,
.id_table = tps_65023_id,
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 5b494db9f95c..9d6ea3a4dccd 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -629,7 +629,6 @@ static struct spi_driver pmic_driver = {
.probe = pmic_probe,
.driver = {
.name = "tps6524x",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index baa5d047f9c8..85706a9f82c9 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -772,7 +772,6 @@ static int ds1305_remove(struct spi_device *spi)
static struct spi_driver ds1305_driver = {
.driver.name = "rtc-ds1305",
- .driver.owner = THIS_MODULE,
.probe = ds1305_probe,
.remove = ds1305_remove,
/* REVISIT add suspend/resume */
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index 79a06dd3c185..07371a9e3793 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -731,7 +731,6 @@ static SIMPLE_DEV_PM_OPS(ds1343_pm, ds1343_suspend, ds1343_resume);
static struct spi_driver ds1343_driver = {
.driver = {
.name = "ds1343",
- .owner = THIS_MODULE,
.pm = &ds1343_pm,
},
.probe = ds1343_probe,
diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c
index c82b4c050326..641e8e8a0dd7 100644
--- a/drivers/rtc/rtc-ds1347.c
+++ b/drivers/rtc/rtc-ds1347.c
@@ -154,7 +154,6 @@ static int ds1347_probe(struct spi_device *spi)
static struct spi_driver ds1347_driver = {
.driver = {
.name = "ds1347",
- .owner = THIS_MODULE,
},
.probe = ds1347_probe,
};
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c
index e67bfcb3a1aa..4c229c97ef97 100644
--- a/drivers/rtc/rtc-ds1390.c
+++ b/drivers/rtc/rtc-ds1390.c
@@ -156,7 +156,6 @@ static int ds1390_probe(struct spi_device *spi)
static struct spi_driver ds1390_driver = {
.driver = {
.name = "rtc-ds1390",
- .owner = THIS_MODULE,
},
.probe = ds1390_probe,
};
diff --git a/drivers/rtc/rtc-ds3234.c b/drivers/rtc/rtc-ds3234.c
index 4c9ba5368464..570ab28fc354 100644
--- a/drivers/rtc/rtc-ds3234.c
+++ b/drivers/rtc/rtc-ds3234.c
@@ -159,7 +159,6 @@ static int ds3234_probe(struct spi_device *spi)
static struct spi_driver ds3234_driver = {
.driver = {
.name = "ds3234",
- .owner = THIS_MODULE,
},
.probe = ds3234_probe,
};
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 4698c7e344e4..5ac45fc1a787 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -197,7 +197,6 @@ static int m41t93_probe(struct spi_device *spi)
static struct spi_driver m41t93_driver = {
.driver = {
.name = "rtc-m41t93",
- .owner = THIS_MODULE,
},
.probe = m41t93_probe,
};
diff --git a/drivers/rtc/rtc-m41t94.c b/drivers/rtc/rtc-m41t94.c
index 8d800b1bf87b..1f0eb79e69f9 100644
--- a/drivers/rtc/rtc-m41t94.c
+++ b/drivers/rtc/rtc-m41t94.c
@@ -137,7 +137,6 @@ static int m41t94_probe(struct spi_device *spi)
static struct spi_driver m41t94_driver = {
.driver = {
.name = "rtc-m41t94",
- .owner = THIS_MODULE,
},
.probe = m41t94_probe,
};
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index ac3f4191864f..315d09e0f2c1 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -146,7 +146,6 @@ static int max6902_probe(struct spi_device *spi)
static struct spi_driver max6902_driver = {
.driver = {
.name = "rtc-max6902",
- .owner = THIS_MODULE,
},
.probe = max6902_probe,
};
diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c
index 34295bf00416..1c91ce8a6d75 100644
--- a/drivers/rtc/rtc-mcp795.c
+++ b/drivers/rtc/rtc-mcp795.c
@@ -186,7 +186,6 @@ static int mcp795_probe(struct spi_device *spi)
static struct spi_driver mcp795_driver = {
.driver = {
.name = "rtc-mcp795",
- .owner = THIS_MODULE,
},
.probe = mcp795_probe,
};
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 1c47650fe624..ea8a31c91641 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -346,7 +346,6 @@ MODULE_DEVICE_TABLE(of, pcf2123_dt_ids);
static struct spi_driver pcf2123_driver = {
.driver = {
.name = "rtc-pcf2123",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pcf2123_dt_ids),
},
.probe = pcf2123_probe,
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index feeedbd82000..83d2bcca6a8f 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -172,7 +172,6 @@ static int r9701_remove(struct spi_device *spi)
static struct spi_driver r9701_driver = {
.driver = {
.name = "rtc-r9701",
- .owner = THIS_MODULE,
},
.probe = r9701_probe,
.remove = r9701_remove,
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 090a101c1c81..1162fecab8cf 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -221,7 +221,6 @@ static int rs5c348_probe(struct spi_device *spi)
static struct spi_driver rs5c348_driver = {
.driver = {
.name = "rtc-rs5c348",
- .owner = THIS_MODULE,
},
.probe = rs5c348_probe,
};
diff --git a/drivers/rtc/rtc-rx4581.c b/drivers/rtc/rtc-rx4581.c
index 6889222f9ed6..de3fe4f8d133 100644
--- a/drivers/rtc/rtc-rx4581.c
+++ b/drivers/rtc/rtc-rx4581.c
@@ -291,7 +291,6 @@ MODULE_DEVICE_TABLE(spi, rx4581_id);
static struct spi_driver rx4581_driver = {
.driver = {
.name = "rtc-rx4581",
- .owner = THIS_MODULE,
},
.probe = rx4581_probe,
.id_table = rx4581_id,
diff --git a/drivers/scsi/be2iscsi/Kconfig b/drivers/scsi/be2iscsi/Kconfig
index ceaca32e788d..4e7cad272469 100644
--- a/drivers/scsi/be2iscsi/Kconfig
+++ b/drivers/scsi/be2iscsi/Kconfig
@@ -1,9 +1,9 @@
config BE2ISCSI
- tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2"
+ tristate "Emulex 10Gbps iSCSI - BladeEngine 2"
depends on PCI && SCSI && NET
select SCSI_ISCSI_ATTRS
select ISCSI_BOOT_SYSFS
help
- This driver implements the iSCSI functionality for ServerEngines'
+ This driver implements the iSCSI functionality for Emulex
10Gbps Storage adapter - BladeEngine 2.
diff --git a/drivers/scsi/be2iscsi/Makefile b/drivers/scsi/be2iscsi/Makefile
index c11f443e3f83..d0488eaafc25 100644
--- a/drivers/scsi/be2iscsi/Makefile
+++ b/drivers/scsi/be2iscsi/Makefile
@@ -1,5 +1,5 @@
#
-# Makefile to build the iSCSI driver for ServerEngine's BladeEngine.
+# Makefile to build the iSCSI driver for Emulex OneConnect.
#
#
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 32070099c333..77f992e74726 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -10,7 +10,7 @@
* Contact Information:
* linux-drivers@avagotech.com
*
- * Avago Technologies
+ * Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 185391a64d4b..2778089b01a5 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -10,7 +10,7 @@
* Contact Information:
* linux-drivers@avagotech.com
*
- * Avago Technologies
+ * Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index cdfbc5c19cf4..4bfca355fbe4 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -10,7 +10,7 @@
* Contact Information:
* linux-drivers@avagotech.com
*
- * Avago Technologies
+ * Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 2f0700796842..b7087ba69d8d 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
* Contact Information:
* linux-drivers@avagotech.com
*
- * Avago Technologies
+ * Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 7a6dbfbccec9..2e6abe7b7324 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
* Contact Information:
* linux-drivers@avagotech.com
*
- * Avago Technologies
+ * Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
@@ -50,7 +50,7 @@ static unsigned int enable_msix = 1;
MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
MODULE_VERSION(BUILD_STR);
-MODULE_AUTHOR("Avago Technologies");
+MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
module_param(be_iopoll_budget, int, 0);
module_param(enable_msix, int, 0);
@@ -552,7 +552,7 @@ MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
static struct scsi_host_template beiscsi_sht = {
.module = THIS_MODULE,
- .name = "Avago Technologies 10Gbe open-iscsi Initiator Driver",
+ .name = "Emulex 10Gbe open-iscsi Initiator Driver",
.proc_name = DRV_NAME,
.queuecommand = iscsi_queuecommand,
.change_queue_depth = scsi_change_queue_depth,
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index b8c0c7819cb1..51366de5ef70 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
* Contact Information:
* linux-drivers@avagotech.com
*
- * Avago Technologies
+ * Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
@@ -37,7 +37,7 @@
#define DRV_NAME "be2iscsi"
#define BUILD_STR "10.6.0.0"
-#define BE_NAME "Avago Technologies OneConnect" \
+#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index ca4016f20e76..1b2bd044dad6 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
* Contact Information:
* linux-drivers@avagotech.com
*
- * Avago Technologies
+ * Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index b58a7decbd94..afa326da75c6 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
* Contact Information:
* linux-drivers@avagotech.com
*
- * Avago Technologies
+ * Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index d5cdc4776707..b0bc5ffee903 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -856,7 +856,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
return;
default:
- printk(KERN_ERR PFX "Unknown netevent %ld", event);
return;
}
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 1c56037146e1..c11cd193f896 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -16,10 +16,12 @@
#define _CXLFLASH_COMMON_H
#include <linux/list.h>
+#include <linux/rwsem.h>
#include <linux/types.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
+extern const struct file_operations cxlflash_cxl_fops;
#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
@@ -78,7 +80,7 @@ enum cxlflash_init_state {
enum cxlflash_state {
STATE_NORMAL, /* Normal running state, everything good */
- STATE_LIMBO, /* Limbo running state, trying to reset/recover */
+ STATE_RESET, /* Reset state, trying to reset/recover */
STATE_FAILTERM /* Failed/terminating state, error out users/threads */
};
@@ -101,29 +103,28 @@ struct cxlflash_cfg {
enum cxlflash_init_state init_state;
enum cxlflash_lr_state lr_state;
int lr_port;
+ atomic_t scan_host_needed;
struct cxl_afu *cxl_afu;
-
- struct pci_pool *cxlflash_cmd_pool;
struct pci_dev *parent_dev;
atomic_t recovery_threads;
struct mutex ctx_recovery_mutex;
struct mutex ctx_tbl_list_mutex;
+ struct rw_semaphore ioctl_rwsem;
struct ctx_info *ctx_tbl[MAX_CONTEXT];
struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
struct file_operations cxl_fops;
- atomic_t num_user_contexts;
-
/* Parameters that are LUN table related */
int last_lun_index[CXLFLASH_NUM_FC_PORTS];
int promote_lun_index;
struct list_head lluns; /* list of llun_info structs */
wait_queue_head_t tmf_waitq;
+ spinlock_t tmf_slock;
bool tmf_active;
- wait_queue_head_t limbo_waitq;
+ wait_queue_head_t reset_waitq;
enum cxlflash_state state;
};
@@ -160,9 +161,9 @@ struct afu {
/* AFU HW */
struct cxl_ioctl_start_work work;
- struct cxlflash_afu_map *afu_map; /* entire MMIO map */
- struct sisl_host_map *host_map; /* MC host map */
- struct sisl_ctrl_map *ctrl_map; /* MC control map */
+ struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
+ struct sisl_host_map __iomem *host_map; /* MC host map */
+ struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
ctx_hndl_t ctx_hndl; /* master's context handle */
u64 *hrrq_start;
@@ -175,7 +176,7 @@ struct afu {
u32 cmd_couts; /* Number of command checkouts */
u32 internal_lun; /* User-desired LUN mode for this AFU */
- char version[8];
+ char version[16];
u64 interface_version;
struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
@@ -184,17 +185,12 @@ struct afu {
static inline u64 lun_to_lunid(u64 lun)
{
- u64 lun_id;
+ __be64 lun_id;
int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
- return swab64(lun_id);
+ return be64_to_cpu(lun_id);
}
-int cxlflash_send_cmd(struct afu *, struct afu_cmd *);
-void cxlflash_wait_resp(struct afu *, struct afu_cmd *);
-int cxlflash_afu_reset(struct cxlflash_cfg *);
-struct afu_cmd *cxlflash_cmd_checkout(struct afu *);
-void cxlflash_cmd_checkin(struct afu_cmd *);
int cxlflash_afu_sync(struct afu *, ctx_hndl_t, res_hndl_t, u8);
void cxlflash_list_init(void);
void cxlflash_term_global_luns(void);
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index d98ad0ff64c1..a0923cade6f3 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -41,7 +41,6 @@ static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
}
lli->sdev = sdev;
- lli->newly_created = true;
lli->host_no = sdev->host->host_no;
lli->in_table = false;
@@ -74,24 +73,19 @@ out:
}
/**
- * refresh_local() - find and update local LUN information structure by WWID
+ * lookup_local() - find a local LUN information structure by WWID
* @cfg: Internal structure associated with the host.
* @wwid: WWID associated with LUN.
*
- * When the LUN is found, mark it by updating it's newly_created field.
- *
* Return: Found local lun_info structure on success, NULL on failure
- * If a LUN with the WWID is found in the list, refresh it's state.
*/
-static struct llun_info *refresh_local(struct cxlflash_cfg *cfg, u8 *wwid)
+static struct llun_info *lookup_local(struct cxlflash_cfg *cfg, u8 *wwid)
{
struct llun_info *lli, *temp;
list_for_each_entry_safe(lli, temp, &cfg->lluns, list)
- if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN)) {
- lli->newly_created = false;
+ if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
return lli;
- }
return NULL;
}
@@ -120,7 +114,8 @@ static struct glun_info *lookup_global(u8 *wwid)
*
* The LUN is kept both in a local list (per adapter) and in a global list
* (across all adapters). Certain attributes of the LUN are local to the
- * adapter (such as index, port selection mask etc.).
+ * adapter (such as index, port selection mask, etc.).
+ *
* The block allocation map is shared across all adapters (i.e. associated
* wih the global list). Since different attributes are associated with
* the per adapter and global entries, allocate two separate structures for each
@@ -128,6 +123,8 @@ static struct glun_info *lookup_global(u8 *wwid)
*
* Keep a pointer back from the local to the global entry.
*
+ * This routine assumes the caller holds the global mutex.
+ *
* Return: Found/Allocated local lun_info structure on success, NULL on failure
*/
static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
@@ -137,11 +134,10 @@ static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
struct Scsi_Host *shost = sdev->host;
struct cxlflash_cfg *cfg = shost_priv(shost);
- mutex_lock(&global.mutex);
if (unlikely(!wwid))
goto out;
- lli = refresh_local(cfg, wwid);
+ lli = lookup_local(cfg, wwid);
if (lli)
goto out;
@@ -169,7 +165,6 @@ static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
list_add(&gli->list, &global.gluns);
out:
- mutex_unlock(&global.mutex);
pr_debug("%s: returning %p\n", __func__, lli);
return lli;
}
@@ -235,10 +230,11 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
u64 flags = manage->hdr.flags;
u32 chan = sdev->channel;
+ mutex_lock(&global.mutex);
lli = find_and_create_lun(sdev, manage->wwid);
pr_debug("%s: ENTER: WWID = %016llX%016llX, flags = %016llX li = %p\n",
- __func__, get_unaligned_le64(&manage->wwid[0]),
- get_unaligned_le64(&manage->wwid[8]),
+ __func__, get_unaligned_be64(&manage->wwid[0]),
+ get_unaligned_be64(&manage->wwid[8]),
manage->hdr.flags, lli);
if (unlikely(!lli)) {
rc = -ENOMEM;
@@ -246,21 +242,28 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
}
if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) {
- if (lli->newly_created)
- lli->port_sel = CHAN2PORT(chan);
- else
- lli->port_sel = BOTH_PORTS;
- /* Store off lun in unpacked, AFU-friendly format */
+ /*
+ * Update port selection mask based upon channel, store off LUN
+ * in unpacked, AFU-friendly format, and hang LUN reference in
+ * the sdev.
+ */
+ lli->port_sel |= CHAN2PORT(chan);
lli->lun_id[chan] = lun_to_lunid(sdev->lun);
sdev->hostdata = lli;
} else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) {
if (lli->parent->mode != MODE_NONE)
rc = -EBUSY;
- else
+ else {
sdev->hostdata = NULL;
+ lli->port_sel &= ~CHAN2PORT(chan);
+ }
}
+ pr_debug("%s: port_sel = %08X chan = %u lun_id = %016llX\n", __func__,
+ lli->port_sel, chan, lli->lun_id[chan]);
+
out:
+ mutex_unlock(&global.mutex);
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 3e3ccf16e7c2..1e5bf0ca81da 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -34,9 +34,8 @@ MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
-
/**
- * cxlflash_cmd_checkout() - checks out an AFU command
+ * cmd_checkout() - checks out an AFU command
* @afu: AFU to checkout from.
*
* Commands are checked out in a round-robin fashion. Note that since
@@ -47,7 +46,7 @@ MODULE_LICENSE("GPL");
*
* Return: The checked out command or NULL when command pool is empty.
*/
-struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu)
+static struct afu_cmd *cmd_checkout(struct afu *afu)
{
int k, dec = CXLFLASH_NUM_CMDS;
struct afu_cmd *cmd;
@@ -58,8 +57,8 @@ struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu)
cmd = &afu->cmd[k];
if (!atomic_dec_if_positive(&cmd->free)) {
- pr_debug("%s: returning found index=%d\n",
- __func__, cmd->slot);
+ pr_devel("%s: returning found index=%d cmd=%p\n",
+ __func__, cmd->slot, cmd);
memset(cmd->buf, 0, CMD_BUFSIZE);
memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
return cmd;
@@ -70,7 +69,7 @@ struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu)
}
/**
- * cxlflash_cmd_checkin() - checks in an AFU command
+ * cmd_checkin() - checks in an AFU command
* @cmd: AFU command to checkin.
*
* Safe to pass commands that have already been checked in. Several
@@ -79,7 +78,7 @@ struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu)
* to avoid clobbering values in the event that the command is checked
* out right away.
*/
-void cxlflash_cmd_checkin(struct afu_cmd *cmd)
+static void cmd_checkin(struct afu_cmd *cmd)
{
cmd->rcb.scp = NULL;
cmd->rcb.timeout = 0;
@@ -93,7 +92,7 @@ void cxlflash_cmd_checkin(struct afu_cmd *cmd)
return;
}
- pr_debug("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
+ pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
}
/**
@@ -107,6 +106,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
{
struct sisl_ioarcb *ioarcb;
struct sisl_ioasa *ioasa;
+ u32 resid;
if (unlikely(!cmd))
return;
@@ -115,9 +115,10 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
ioasa = &(cmd->sa);
if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
- pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
- __func__, cmd, scp);
- scp->result = (DID_ERROR << 16);
+ resid = ioasa->resid;
+ scsi_set_resid(scp, resid);
+ pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
+ __func__, cmd, scp, resid);
}
if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
@@ -127,7 +128,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
}
pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
- "afu_extra=0x%X, scsi_entra=0x%X, fc_extra=0x%X\n",
+ "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
__func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
ioasa->fc_extra);
@@ -158,8 +159,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
/* If the SISL_RC_FLAGS_OVERRUN flag was set,
* then we will handle this error else where.
* If not then we must handle it here.
- * This is probably an AFU bug. We will
- * attempt a retry to see if that resolves it.
+ * This is probably an AFU bug.
*/
scp->result = (DID_ERROR << 16);
}
@@ -183,7 +183,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
/* We have an AFU error */
switch (ioasa->rc.afu_rc) {
case SISL_AFU_RC_NO_CHANNELS:
- scp->result = (DID_MEDIUM_ERROR << 16);
+ scp->result = (DID_NO_CONNECT << 16);
break;
case SISL_AFU_RC_DATA_DMA_ERR:
switch (ioasa->afu_extra) {
@@ -217,7 +217,6 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
static void cmd_complete(struct afu_cmd *cmd)
{
struct scsi_cmnd *scp;
- u32 resid;
ulong lock_flags;
struct afu *afu = cmd->parent;
struct cxlflash_cfg *cfg = afu->parent;
@@ -229,45 +228,179 @@ static void cmd_complete(struct afu_cmd *cmd)
if (cmd->rcb.scp) {
scp = cmd->rcb.scp;
- if (unlikely(cmd->sa.rc.afu_rc ||
- cmd->sa.rc.scsi_rc ||
- cmd->sa.rc.fc_rc))
+ if (unlikely(cmd->sa.ioasc))
process_cmd_err(cmd, scp);
else
scp->result = (DID_OK << 16);
- resid = cmd->sa.resid;
cmd_is_tmf = cmd->cmd_tmf;
- cxlflash_cmd_checkin(cmd); /* Don't use cmd after here */
+ cmd_checkin(cmd); /* Don't use cmd after here */
- pr_debug("%s: calling scsi_set_resid, scp=%p "
- "result=%X resid=%d\n", __func__,
- scp, scp->result, resid);
+ pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
+ "ioasc=%d\n", __func__, scp, scp->result,
+ cmd->sa.ioasc);
- scsi_set_resid(scp, resid);
scsi_dma_unmap(scp);
scp->scsi_done(scp);
if (cmd_is_tmf) {
- spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+ spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
cfg->tmf_active = false;
wake_up_all_locked(&cfg->tmf_waitq);
- spin_unlock_irqrestore(&cfg->tmf_waitq.lock,
- lock_flags);
+ spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
}
} else
complete(&cmd->cevent);
}
/**
+ * context_reset() - timeout handler for AFU commands
+ * @cmd: AFU command that timed out.
+ *
+ * Sends a reset to the AFU.
+ */
+static void context_reset(struct afu_cmd *cmd)
+{
+ int nretry = 0;
+ u64 rrin = 0x1;
+ u64 room = 0;
+ struct afu *afu = cmd->parent;
+ ulong lock_flags;
+
+ pr_debug("%s: cmd=%p\n", __func__, cmd);
+
+ spin_lock_irqsave(&cmd->slock, lock_flags);
+
+ /* Already completed? */
+ if (cmd->sa.host_use_b[0] & B_DONE) {
+ spin_unlock_irqrestore(&cmd->slock, lock_flags);
+ return;
+ }
+
+ cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
+ spin_unlock_irqrestore(&cmd->slock, lock_flags);
+
+ /*
+ * We really want to send this reset at all costs, so spread
+ * out wait time on successive retries for available room.
+ */
+ do {
+ room = readq_be(&afu->host_map->cmd_room);
+ atomic64_set(&afu->room, room);
+ if (room)
+ goto write_rrin;
+ udelay(nretry);
+ } while (nretry++ < MC_ROOM_RETRY_CNT);
+
+ pr_err("%s: no cmd_room to send reset\n", __func__);
+ return;
+
+write_rrin:
+ nretry = 0;
+ writeq_be(rrin, &afu->host_map->ioarrin);
+ do {
+ rrin = readq_be(&afu->host_map->ioarrin);
+ if (rrin != 0x1)
+ break;
+ /* Double delay each time */
+ udelay(2 << nretry);
+ } while (nretry++ < MC_ROOM_RETRY_CNT);
+}
+
+/**
+ * send_cmd() - sends an AFU command
+ * @afu: AFU associated with the host.
+ * @cmd: AFU command to send.
+ *
+ * Return:
+ * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
+ */
+static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
+{
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
+ int nretry = 0;
+ int rc = 0;
+ u64 room;
+ long newval;
+
+ /*
+ * This routine is used by critical users such an AFU sync and to
+ * send a task management function (TMF). Thus we want to retry a
+ * bit before returning an error. To avoid the performance penalty
+ * of MMIO, we spread the update of 'room' over multiple commands.
+ */
+retry:
+ newval = atomic64_dec_if_positive(&afu->room);
+ if (!newval) {
+ do {
+ room = readq_be(&afu->host_map->cmd_room);
+ atomic64_set(&afu->room, room);
+ if (room)
+ goto write_ioarrin;
+ udelay(nretry);
+ } while (nretry++ < MC_ROOM_RETRY_CNT);
+
+ dev_err(dev, "%s: no cmd_room to send 0x%X\n",
+ __func__, cmd->rcb.cdb[0]);
+
+ goto no_room;
+ } else if (unlikely(newval < 0)) {
+ /* This should be rare. i.e. Only if two threads race and
+ * decrement before the MMIO read is done. In this case
+ * just benefit from the other thread having updated
+ * afu->room.
+ */
+ if (nretry++ < MC_ROOM_RETRY_CNT) {
+ udelay(nretry);
+ goto retry;
+ }
+
+ goto no_room;
+ }
+
+write_ioarrin:
+ writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
+out:
+ pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
+ cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
+ return rc;
+
+no_room:
+ afu->read_room = true;
+ schedule_work(&cfg->work_q);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+}
+
+/**
+ * wait_resp() - polls for a response or timeout to a sent AFU command
+ * @afu: AFU associated with the host.
+ * @cmd: AFU command that was sent.
+ */
+static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
+{
+ ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
+
+ timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
+ if (!timeout)
+ context_reset(cmd);
+
+ if (unlikely(cmd->sa.ioasc != 0))
+ pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
+ "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
+ cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
+ cmd->sa.rc.fc_rc);
+}
+
+/**
* send_tmf() - sends a Task Management Function (TMF)
* @afu: AFU to checkout from.
* @scp: SCSI command from stack.
* @tmfcmd: TMF command to send.
*
* Return:
- * 0 on success
- * SCSI_MLQUEUE_HOST_BUSY when host is busy
+ * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
*/
static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
{
@@ -277,25 +410,27 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
short lflag = 0;
struct Scsi_Host *host = scp->device->host;
struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ struct device *dev = &cfg->dev->dev;
ulong lock_flags;
int rc = 0;
+ ulong to;
- cmd = cxlflash_cmd_checkout(afu);
+ cmd = cmd_checkout(afu);
if (unlikely(!cmd)) {
- pr_err("%s: could not get a free command\n", __func__);
+ dev_err(dev, "%s: could not get a free command\n", __func__);
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
- /* If a Task Management Function is active, do not send one more.
- */
- spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+ /* When Task Management Function is active do not send another */
+ spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
if (cfg->tmf_active)
- wait_event_interruptible_locked_irq(cfg->tmf_waitq,
- !cfg->tmf_active);
+ wait_event_interruptible_lock_irq(cfg->tmf_waitq,
+ !cfg->tmf_active,
+ cfg->tmf_slock);
cfg->tmf_active = true;
cmd->cmd_tmf = true;
- spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+ spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
cmd->rcb.ctx_id = afu->ctx_hndl;
cmd->rcb.port_sel = port_sel;
@@ -313,18 +448,27 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
/* Send the command */
- rc = cxlflash_send_cmd(afu, cmd);
+ rc = send_cmd(afu, cmd);
if (unlikely(rc)) {
- cxlflash_cmd_checkin(cmd);
- spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+ cmd_checkin(cmd);
+ spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
cfg->tmf_active = false;
- spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+ spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
goto out;
}
- spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
- wait_event_interruptible_locked_irq(cfg->tmf_waitq, !cfg->tmf_active);
- spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+ spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
+ to = msecs_to_jiffies(5000);
+ to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
+ !cfg->tmf_active,
+ cfg->tmf_slock,
+ to);
+ if (!to) {
+ cfg->tmf_active = false;
+ dev_err(dev, "%s: TMF timed out!\n", __func__);
+ rc = -1;
+ }
+ spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
out:
return rc;
}
@@ -345,15 +489,13 @@ static const char *cxlflash_driver_info(struct Scsi_Host *host)
* @host: SCSI host associated with device.
* @scp: SCSI command to send.
*
- * Return:
- * 0 on success
- * SCSI_MLQUEUE_HOST_BUSY when host is busy
+ * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
*/
static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
{
struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
struct afu *afu = cfg->afu;
- struct pci_dev *pdev = cfg->dev;
+ struct device *dev = &cfg->dev->dev;
struct afu_cmd *cmd;
u32 port_sel = scp->device->channel + 1;
int nseg, i, ncount;
@@ -362,34 +504,34 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
short lflag = 0;
int rc = 0;
- pr_debug("%s: (scp=%p) %d/%d/%d/%llu cdb=(%08X-%08X-%08X-%08X)\n",
- __func__, scp, host->host_no, scp->device->channel,
- scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+ dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
+ "cdb=(%08X-%08X-%08X-%08X)\n",
+ __func__, scp, host->host_no, scp->device->channel,
+ scp->device->id, scp->device->lun,
+ get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
- /* If a Task Management Function is active, wait for it to complete
+ /*
+ * If a Task Management Function is active, wait for it to complete
* before continuing with regular commands.
*/
- spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+ spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
if (cfg->tmf_active) {
- spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+ spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
- spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+ spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
switch (cfg->state) {
- case STATE_LIMBO:
- dev_dbg_ratelimited(&cfg->dev->dev, "%s: device in limbo!\n",
- __func__);
+ case STATE_RESET:
+ dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
case STATE_FAILTERM:
- dev_dbg_ratelimited(&cfg->dev->dev, "%s: device has failed!\n",
- __func__);
+ dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
scp->result = (DID_NO_CONNECT << 16);
scp->scsi_done(scp);
rc = 0;
@@ -398,9 +540,9 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
break;
}
- cmd = cxlflash_cmd_checkout(afu);
+ cmd = cmd_checkout(afu);
if (unlikely(!cmd)) {
- pr_err("%s: could not get a free command\n", __func__);
+ dev_err(dev, "%s: could not get a free command\n", __func__);
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -422,7 +564,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
nseg = scsi_dma_map(scp);
if (unlikely(nseg < 0)) {
- dev_err(&pdev->dev, "%s: Fail DMA map! nseg=%d\n",
+ dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
__func__, nseg);
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
@@ -438,347 +580,34 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
/* Send the command */
- rc = cxlflash_send_cmd(afu, cmd);
+ rc = send_cmd(afu, cmd);
if (unlikely(rc)) {
- cxlflash_cmd_checkin(cmd);
+ cmd_checkin(cmd);
scsi_dma_unmap(scp);
}
out:
+ pr_devel("%s: returning rc=%d\n", __func__, rc);
return rc;
}
/**
- * cxlflash_eh_device_reset_handler() - reset a single LUN
- * @scp: SCSI command to send.
- *
- * Return:
- * SUCCESS as defined in scsi/scsi.h
- * FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
-{
- int rc = SUCCESS;
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
- struct afu *afu = cfg->afu;
- int rcr = 0;
-
- pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
- host->host_no, scp->device->channel,
- scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
- switch (cfg->state) {
- case STATE_NORMAL:
- rcr = send_tmf(afu, scp, TMF_LUN_RESET);
- if (unlikely(rcr))
- rc = FAILED;
- break;
- case STATE_LIMBO:
- wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
- if (cfg->state == STATE_NORMAL)
- break;
- /* fall through */
- default:
- rc = FAILED;
- break;
- }
-
- pr_debug("%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_eh_host_reset_handler() - reset the host adapter
- * @scp: SCSI command from stack identifying host.
- *
- * Return:
- * SUCCESS as defined in scsi/scsi.h
- * FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
-{
- int rc = SUCCESS;
- int rcr = 0;
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
-
- pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
- host->host_no, scp->device->channel,
- scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
- switch (cfg->state) {
- case STATE_NORMAL:
- cfg->state = STATE_LIMBO;
- scsi_block_requests(cfg->host);
- cxlflash_mark_contexts_error(cfg);
- rcr = cxlflash_afu_reset(cfg);
- if (rcr) {
- rc = FAILED;
- cfg->state = STATE_FAILTERM;
- } else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->limbo_waitq);
- scsi_unblock_requests(cfg->host);
- break;
- case STATE_LIMBO:
- wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
- if (cfg->state == STATE_NORMAL)
- break;
- /* fall through */
- default:
- rc = FAILED;
- break;
- }
-
- pr_debug("%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_change_queue_depth() - change the queue depth for the device
- * @sdev: SCSI device destined for queue depth change.
- * @qdepth: Requested queue depth value to set.
- *
- * The requested queue depth is capped to the maximum supported value.
- *
- * Return: The actual queue depth set.
- */
-static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
-{
-
- if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
- qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
-
- scsi_change_queue_depth(sdev, qdepth);
- return sdev->queue_depth;
-}
-
-/**
- * cxlflash_show_port_status() - queries and presents the current port status
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t cxlflash_show_port_status(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
- struct afu *afu = cfg->afu;
-
- char *disp_status;
- int rc;
- u32 port;
- u64 status;
- u64 *fc_regs;
-
- rc = kstrtouint((attr->attr.name + 4), 10, &port);
- if (rc || (port >= NUM_FC_PORTS))
- return 0;
-
- fc_regs = &afu->afu_map->global.fc_regs[port][0];
- status =
- (readq_be(&fc_regs[FC_MTIP_STATUS / 8]) & FC_MTIP_STATUS_MASK);
-
- if (status == FC_MTIP_STATUS_ONLINE)
- disp_status = "online";
- else if (status == FC_MTIP_STATUS_OFFLINE)
- disp_status = "offline";
- else
- disp_status = "unknown";
-
- return snprintf(buf, PAGE_SIZE, "%s\n", disp_status);
-}
-
-/**
- * cxlflash_show_lun_mode() - presents the current LUN mode of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the lun mode.
- * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t cxlflash_show_lun_mode(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
- struct afu *afu = cfg->afu;
-
- return snprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
-}
-
-/**
- * cxlflash_store_lun_mode() - sets the LUN mode of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the lun mode.
- * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * The CXL Flash AFU supports a dummy LUN mode where the external
- * links and storage are not required. Space on the FPGA is used
- * to create 1 or 2 small LUNs which are presented to the system
- * as if they were a normal storage device. This feature is useful
- * during development and also provides manufacturing with a way
- * to test the AFU without an actual device.
- *
- * 0 = external LUN[s] (default)
- * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
- * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
- * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
- * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t cxlflash_store_lun_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
- struct afu *afu = cfg->afu;
- int rc;
- u32 lun_mode;
-
- rc = kstrtouint(buf, 10, &lun_mode);
- if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
- afu->internal_lun = lun_mode;
- cxlflash_afu_reset(cfg);
- scsi_scan_host(cfg->host);
- }
-
- return count;
-}
-
-/**
- * cxlflash_show_ioctl_version() - presents the current ioctl version of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the ioctl version.
- * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t cxlflash_show_ioctl_version(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
-}
-
-/**
- * cxlflash_show_dev_mode() - presents the current mode of the device
- * @dev: Generic device associated with the device.
- * @attr: Device attribute representing the device mode.
- * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t cxlflash_show_dev_mode(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
-
- return snprintf(buf, PAGE_SIZE, "%s\n",
- sdev->hostdata ? "superpipe" : "legacy");
-}
-
-/**
* cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*/
static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
{
struct pci_dev *pdev = cfg->dev;
if (pci_channel_offline(pdev))
- wait_event_timeout(cfg->limbo_waitq,
+ wait_event_timeout(cfg->reset_waitq,
!pci_channel_offline(pdev),
CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
}
-/*
- * Host attributes
- */
-static DEVICE_ATTR(port0, S_IRUGO, cxlflash_show_port_status, NULL);
-static DEVICE_ATTR(port1, S_IRUGO, cxlflash_show_port_status, NULL);
-static DEVICE_ATTR(lun_mode, S_IRUGO | S_IWUSR, cxlflash_show_lun_mode,
- cxlflash_store_lun_mode);
-static DEVICE_ATTR(ioctl_version, S_IRUGO, cxlflash_show_ioctl_version, NULL);
-
-static struct device_attribute *cxlflash_host_attrs[] = {
- &dev_attr_port0,
- &dev_attr_port1,
- &dev_attr_lun_mode,
- &dev_attr_ioctl_version,
- NULL
-};
-
-/*
- * Device attributes
- */
-static DEVICE_ATTR(mode, S_IRUGO, cxlflash_show_dev_mode, NULL);
-
-static struct device_attribute *cxlflash_dev_attrs[] = {
- &dev_attr_mode,
- NULL
-};
-
-/*
- * Host template
- */
-static struct scsi_host_template driver_template = {
- .module = THIS_MODULE,
- .name = CXLFLASH_ADAPTER_NAME,
- .info = cxlflash_driver_info,
- .ioctl = cxlflash_ioctl,
- .proc_name = CXLFLASH_NAME,
- .queuecommand = cxlflash_queuecommand,
- .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
- .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
- .change_queue_depth = cxlflash_change_queue_depth,
- .cmd_per_lun = 16,
- .can_queue = CXLFLASH_MAX_CMDS,
- .this_id = -1,
- .sg_tablesize = SG_NONE, /* No scatter gather support. */
- .max_sectors = CXLFLASH_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
- .shost_attrs = cxlflash_host_attrs,
- .sdev_attrs = cxlflash_dev_attrs,
-};
-
-/*
- * Device dependent values
- */
-static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
-
-/*
- * PCI device binding table
- */
-static struct pci_device_id cxlflash_pci_table[] = {
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
-
/**
* free_mem() - free memory associated with the AFU
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*/
static void free_mem(struct cxlflash_cfg *cfg)
{
@@ -800,7 +629,7 @@ static void free_mem(struct cxlflash_cfg *cfg)
/**
* stop_afu() - stops the AFU command timers and unmaps the MMIO space
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*
* Safe to call with AFU in a partially allocated/initialized state.
*/
@@ -814,7 +643,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
complete(&afu->cmd[i].cevent);
if (likely(afu->afu_map)) {
- cxl_psa_unmap((void *)afu->afu_map);
+ cxl_psa_unmap((void __iomem *)afu->afu_map);
afu->afu_map = NULL;
}
}
@@ -822,7 +651,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
/**
* term_mc() - terminates the master context
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
* @level: Depth of allocation, where to begin waterfall tear down.
*
* Safe to call with AFU/MC in partially allocated/initialized state.
@@ -831,9 +660,10 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
{
int rc = 0;
struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
if (!afu || !cfg->mcctx) {
- pr_err("%s: returning from term_mc with NULL afu or MC\n",
+ dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n",
__func__);
return;
}
@@ -857,7 +687,7 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
/**
* term_afu() - terminates the AFU
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*
* Safe to call with AFU/MC in partially allocated/initialized state.
*/
@@ -885,11 +715,12 @@ static void cxlflash_remove(struct pci_dev *pdev)
/* If a Task Management Function is active, wait for it to complete
* before continuing with remove.
*/
- spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+ spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
if (cfg->tmf_active)
- wait_event_interruptible_locked_irq(cfg->tmf_waitq,
- !cfg->tmf_active);
- spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+ wait_event_interruptible_lock_irq(cfg->tmf_waitq,
+ !cfg->tmf_active,
+ cfg->tmf_slock);
+ spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
cfg->state = STATE_FAILTERM;
cxlflash_stop_term_user_contexts(cfg);
@@ -898,16 +729,16 @@ static void cxlflash_remove(struct pci_dev *pdev)
case INIT_STATE_SCSI:
cxlflash_term_local_luns(cfg);
scsi_remove_host(cfg->host);
- scsi_host_put(cfg->host);
- /* Fall through */
+ /* fall through */
case INIT_STATE_AFU:
term_afu(cfg);
+ cancel_work_sync(&cfg->work_q);
case INIT_STATE_PCI:
pci_release_regions(cfg->dev);
pci_disable_device(pdev);
case INIT_STATE_NONE:
- flush_work(&cfg->work_q);
free_mem(cfg);
+ scsi_host_put(cfg->host);
break;
}
@@ -916,7 +747,7 @@ static void cxlflash_remove(struct pci_dev *pdev)
/**
* alloc_mem() - allocates the AFU and its command pool
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*
* A partially allocated state remains on failure.
*
@@ -929,15 +760,14 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
int rc = 0;
int i;
char *buf = NULL;
+ struct device *dev = &cfg->dev->dev;
- /* This allocation is about 12K, i.e. only 1 64k page
- * and upto 4 4k pages
- */
+ /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(sizeof(struct afu)));
if (unlikely(!cfg->afu)) {
- pr_err("%s: cannot get %d free pages\n",
- __func__, get_order(sizeof(struct afu)));
+ dev_err(dev, "%s: cannot get %d free pages\n",
+ __func__, get_order(sizeof(struct afu)));
rc = -ENOMEM;
goto out;
}
@@ -948,7 +778,8 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
if (!((u64)buf & (PAGE_SIZE - 1))) {
buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (unlikely(!buf)) {
- pr_err("%s: Allocate command buffers fail!\n",
+ dev_err(dev,
+ "%s: Allocate command buffers fail!\n",
__func__);
rc = -ENOMEM;
free_mem(cfg);
@@ -967,12 +798,9 @@ out:
/**
* init_pci() - initializes the host as a PCI device
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*
- * Return:
- * 0 on success
- * -EIO on unable to communicate with device
- * A return code from the PCI sub-routines
+ * Return: 0 on success, -errno on failure
*/
static int init_pci(struct cxlflash_cfg *cfg)
{
@@ -1052,11 +880,9 @@ out_release_regions:
/**
* init_scsi() - adds the host to the SCSI stack and kicks off host scan
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*
- * Return:
- * 0 on success
- * A return code from adding the host
+ * Return: 0 on success, -errno on failure
*/
static int init_scsi(struct cxlflash_cfg *cfg)
{
@@ -1085,7 +911,7 @@ out:
* that the FC link layer has synced, completed the handshaking process, and
* is ready for login to start.
*/
-static void set_port_online(u64 *fc_regs)
+static void set_port_online(__be64 __iomem *fc_regs)
{
u64 cmdcfg;
@@ -1101,7 +927,7 @@ static void set_port_online(u64 *fc_regs)
*
* The provided MMIO region must be mapped prior to call.
*/
-static void set_port_offline(u64 *fc_regs)
+static void set_port_offline(__be64 __iomem *fc_regs)
{
u64 cmdcfg;
@@ -1125,7 +951,7 @@ static void set_port_offline(u64 *fc_regs)
* FALSE (0) when the specified port fails to come online after timeout
* -EINVAL when @delay_us is less than 1000
*/
-static int wait_port_online(u64 *fc_regs, u32 delay_us, u32 nretry)
+static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
{
u64 status;
@@ -1156,7 +982,7 @@ static int wait_port_online(u64 *fc_regs, u32 delay_us, u32 nretry)
* FALSE (0) when the specified port fails to go offline after timeout
* -EINVAL when @delay_us is less than 1000
*/
-static int wait_port_offline(u64 *fc_regs, u32 delay_us, u32 nretry)
+static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
{
u64 status;
@@ -1191,9 +1017,10 @@ static int wait_port_offline(u64 *fc_regs, u32 delay_us, u32 nretry)
* 0 when the WWPN is successfully written and the port comes back online
* -1 when the port fails to go offline or come back up online
*/
-static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn)
+static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
+ u64 wwpn)
{
- int ret = 0;
+ int rc = 0;
set_port_offline(fc_regs);
@@ -1201,33 +1028,26 @@ static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn)
FC_PORT_STATUS_RETRY_CNT)) {
pr_debug("%s: wait on port %d to go offline timed out\n",
__func__, port);
- ret = -1; /* but continue on to leave the port back online */
+ rc = -1; /* but continue on to leave the port back online */
}
- if (ret == 0)
+ if (rc == 0)
writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
+ /* Always return success after programming WWPN */
+ rc = 0;
+
set_port_online(fc_regs);
if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
FC_PORT_STATUS_RETRY_CNT)) {
- pr_debug("%s: wait on port %d to go online timed out\n",
- __func__, port);
- ret = -1;
-
- /*
- * Override for internal lun!!!
- */
- if (afu->internal_lun) {
- pr_debug("%s: Overriding port %d online timeout!!!\n",
- __func__, port);
- ret = 0;
- }
+ pr_err("%s: wait on port %d to go online timed out\n",
+ __func__, port);
}
- pr_debug("%s: returning rc=%d\n", __func__, ret);
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
- return ret;
+ return rc;
}
/**
@@ -1243,7 +1063,7 @@ static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn)
* the alternate port exclusively while the reset takes place.
* failure to come online is overridden.
*/
-static void afu_link_reset(struct afu *afu, int port, u64 *fc_regs)
+static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
{
u64 port_sel;
@@ -1280,19 +1100,19 @@ static const struct asyc_intr_info ainfo[] = {
{SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
{SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
{SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
- {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, 0},
+ {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
- {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, 0},
+ {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
- {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
+ {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST},
{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
{SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
- {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, 0},
+ {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
- {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
+ {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST},
{0x0, "", 0, 0} /* terminator */
};
@@ -1454,47 +1274,46 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
{
struct afu *afu = (struct afu *)data;
- struct cxlflash_cfg *cfg;
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
u64 reg_unmasked;
const struct asyc_intr_info *info;
- struct sisl_global_map *global = &afu->afu_map->global;
+ struct sisl_global_map __iomem *global = &afu->afu_map->global;
u64 reg;
u8 port;
int i;
- cfg = afu->parent;
-
reg = readq_be(&global->regs.aintr_status);
reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
if (reg_unmasked == 0) {
- pr_err("%s: spurious interrupt, aintr_status 0x%016llX\n",
- __func__, reg);
+ dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
+ __func__, reg);
goto out;
}
- /* it is OK to clear AFU status before FC_ERROR */
+ /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
writeq_be(reg_unmasked, &global->regs.aintr_clear);
- /* check each bit that is on */
+ /* Check each bit that is on */
for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
info = find_ainfo(1ULL << i);
- if ((reg_unmasked & 0x1) || !info)
+ if (((reg_unmasked & 0x1) == 0) || !info)
continue;
port = info->port;
- pr_err("%s: FC Port %d -> %s, fc_status 0x%08llX\n",
- __func__, port, info->desc,
+ dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
+ __func__, port, info->desc,
readq_be(&global->fc_regs[port][FC_STATUS / 8]));
/*
- * do link reset first, some OTHER errors will set FC_ERROR
+ * Do link reset first, some OTHER errors will set FC_ERROR
* again if cleared before or w/o a reset
*/
if (info->action & LINK_RESET) {
- pr_err("%s: FC Port %d: resetting link\n",
- __func__, port);
+ dev_err(dev, "%s: FC Port %d: resetting link\n",
+ __func__, port);
cfg->lr_state = LINK_RESET_REQUIRED;
cfg->lr_port = port;
schedule_work(&cfg->work_q);
@@ -1504,26 +1323,31 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
/*
- * since all errors are unmasked, FC_ERROR and FC_ERRCAP
+ * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
* should be the same and tracing one is sufficient.
*/
- pr_err("%s: fc %d: clearing fc_error 0x%08llX\n",
- __func__, port, reg);
+ dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
+ __func__, port, reg);
writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
}
+
+ if (info->action & SCAN_HOST) {
+ atomic_inc(&cfg->scan_host_needed);
+ schedule_work(&cfg->work_q);
+ }
}
out:
- pr_debug("%s: returning rc=%d, afu=%p\n", __func__, IRQ_HANDLED, afu);
+ dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
return IRQ_HANDLED;
}
/**
* start_context() - starts the master context
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*
* Return: A success or failure value from CXL services.
*/
@@ -1541,12 +1365,10 @@ static int start_context(struct cxlflash_cfg *cfg)
/**
* read_vpd() - obtains the WWPNs from VPD
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
* @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
*
- * Return:
- * 0 on success
- * -ENODEV when VPD or WWPN keywords not found
+ * Return: 0 on success, -errno on failure
*/
static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
{
@@ -1561,7 +1383,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
/* Get the VPD data from the device */
vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
if (unlikely(vpd_size <= 0)) {
- pr_err("%s: Unable to read VPD (size = %ld)\n",
+ dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
__func__, vpd_size);
rc = -ENODEV;
goto out;
@@ -1571,7 +1393,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
PCI_VPD_LRDT_RO_DATA);
if (unlikely(ro_start < 0)) {
- pr_err("%s: VPD Read-only data not found\n", __func__);
+ dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
+ __func__);
rc = -ENODEV;
goto out;
}
@@ -1600,8 +1423,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
if (unlikely(i < 0)) {
- pr_err("%s: Port %d WWPN not found in VPD\n",
- __func__, k);
+ dev_err(&dev->dev, "%s: Port %d WWPN not found "
+ "in VPD\n", __func__, k);
rc = -ENODEV;
goto out;
}
@@ -1609,7 +1432,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
j = pci_vpd_info_field_size(&vpd_data[i]);
i += PCI_VPD_INFO_FLD_HDR_SIZE;
if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
- pr_err("%s: Port %d WWPN incomplete or VPD corrupt\n",
+ dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
+ "VPD corrupt\n",
__func__, k);
rc = -ENODEV;
goto out;
@@ -1618,8 +1442,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
if (unlikely(rc)) {
- pr_err("%s: Fail to convert port %d WWPN to integer\n",
- __func__, k);
+ dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
+ "to integer\n", __func__, k);
rc = -ENODEV;
goto out;
}
@@ -1631,91 +1455,36 @@ out:
}
/**
- * cxlflash_context_reset() - timeout handler for AFU commands
- * @cmd: AFU command that timed out.
- *
- * Sends a reset to the AFU.
- */
-void cxlflash_context_reset(struct afu_cmd *cmd)
-{
- int nretry = 0;
- u64 rrin = 0x1;
- u64 room = 0;
- struct afu *afu = cmd->parent;
- ulong lock_flags;
-
- pr_debug("%s: cmd=%p\n", __func__, cmd);
-
- spin_lock_irqsave(&cmd->slock, lock_flags);
-
- /* Already completed? */
- if (cmd->sa.host_use_b[0] & B_DONE) {
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
- return;
- }
-
- cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
-
- /*
- * We really want to send this reset at all costs, so spread
- * out wait time on successive retries for available room.
- */
- do {
- room = readq_be(&afu->host_map->cmd_room);
- atomic64_set(&afu->room, room);
- if (room)
- goto write_rrin;
- udelay(nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- pr_err("%s: no cmd_room to send reset\n", __func__);
- return;
-
-write_rrin:
- nretry = 0;
- writeq_be(rrin, &afu->host_map->ioarrin);
- do {
- rrin = readq_be(&afu->host_map->ioarrin);
- if (rrin != 0x1)
- break;
- /* Double delay each time */
- udelay(2 ^ nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-}
-
-/**
* init_pcr() - initialize the provisioning and control registers
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*
* Also sets up fast access to the mapped registers and initializes AFU
* command fields that never change.
*/
-void init_pcr(struct cxlflash_cfg *cfg)
+static void init_pcr(struct cxlflash_cfg *cfg)
{
struct afu *afu = cfg->afu;
- struct sisl_ctrl_map *ctrl_map;
+ struct sisl_ctrl_map __iomem *ctrl_map;
int i;
for (i = 0; i < MAX_CONTEXT; i++) {
ctrl_map = &afu->afu_map->ctrls[i].ctrl;
- /* disrupt any clients that could be running */
- /* e. g. clients that survived a master restart */
+ /* Disrupt any clients that could be running */
+ /* e.g. clients that survived a master restart */
writeq_be(0, &ctrl_map->rht_start);
writeq_be(0, &ctrl_map->rht_cnt_id);
writeq_be(0, &ctrl_map->ctx_cap);
}
- /* copy frequently used fields into afu */
+ /* Copy frequently used fields into afu */
afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
- /* ctx_hndl is 16 bits in CAIA */
afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
/* Program the Endian Control for the master context */
writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
- /* initialize cmd fields that never change */
+ /* Initialize cmd fields that never change */
for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
@@ -1725,11 +1494,12 @@ void init_pcr(struct cxlflash_cfg *cfg)
/**
* init_global() - initialize AFU global registers
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*/
-int init_global(struct cxlflash_cfg *cfg)
+static int init_global(struct cxlflash_cfg *cfg)
{
struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
int i = 0, num_ports = 0;
int rc = 0;
@@ -1737,13 +1507,13 @@ int init_global(struct cxlflash_cfg *cfg)
rc = read_vpd(cfg, &wwpn[0]);
if (rc) {
- pr_err("%s: could not read vpd rc=%d\n", __func__, rc);
+ dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
goto out;
}
pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
- /* set up RRQ in AFU for master issued cmds */
+ /* Set up RRQ in AFU for master issued cmds */
writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
@@ -1756,9 +1526,9 @@ int init_global(struct cxlflash_cfg *cfg)
/* checker on if dual afu */
writeq_be(reg, &afu->afu_map->global.regs.afu_config);
- /* global port select: select either port */
+ /* Global port select: select either port */
if (afu->internal_lun) {
- /* only use port 0 */
+ /* Only use port 0 */
writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
num_ports = NUM_FC_PORTS - 1;
} else {
@@ -1767,20 +1537,20 @@ int init_global(struct cxlflash_cfg *cfg)
}
for (i = 0; i < num_ports; i++) {
- /* unmask all errors (but they are still masked at AFU) */
+ /* Unmask all errors (but they are still masked at AFU) */
writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
- /* clear CRC error cnt & set a threshold */
+ /* Clear CRC error cnt & set a threshold */
(void)readq_be(&afu->afu_map->global.
fc_regs[i][FC_CNT_CRCERR / 8]);
writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
[FC_CRC_THRESH / 8]);
- /* set WWPNs. If already programmed, wwpn[i] is 0 */
+ /* Set WWPNs. If already programmed, wwpn[i] is 0 */
if (wwpn[i] != 0 &&
afu_set_wwpn(afu, i,
&afu->afu_map->global.fc_regs[i][0],
wwpn[i])) {
- pr_err("%s: failed to set WWPN on port %d\n",
+ dev_err(dev, "%s: failed to set WWPN on port %d\n",
__func__, i);
rc = -EIO;
goto out;
@@ -1789,18 +1559,17 @@ int init_global(struct cxlflash_cfg *cfg)
* offline/online transitions and a PLOGI
*/
msleep(100);
-
}
- /* set up master's own CTX_CAP to allow real mode, host translation */
- /* tbls, afu cmds and read/write GSCSI cmds. */
+ /* Set up master's own CTX_CAP to allow real mode, host translation */
+ /* tables, afu cmds and read/write GSCSI cmds. */
/* First, unlock ctx_cap write by reading mbox */
(void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
&afu->ctrl_map->ctx_cap);
- /* init heartbeat */
+ /* Initialize heartbeat */
afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
out:
@@ -1809,7 +1578,7 @@ out:
/**
* start_afu() - initializes and starts the AFU
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*/
static int start_afu(struct cxlflash_cfg *cfg)
{
@@ -1829,7 +1598,10 @@ static int start_afu(struct cxlflash_cfg *cfg)
init_pcr(cfg);
- /* initialize RRQ pointers */
+ /* After an AFU reset, RRQ entries are stale, clear them */
+ memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
+
+ /* Initialize RRQ pointers */
afu->hrrq_start = &afu->rrq_entry[0];
afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
afu->hrrq_curr = afu->hrrq_start;
@@ -1843,12 +1615,9 @@ static int start_afu(struct cxlflash_cfg *cfg)
/**
* init_mc() - create and register as the master context
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*
- * Return:
- * 0 on success
- * -ENOMEM when unable to obtain a context from CXL services
- * A failure value from CXL services.
+ * Return: 0 on success, -errno on failure
*/
static int init_mc(struct cxlflash_cfg *cfg)
{
@@ -1932,15 +1701,12 @@ out:
/**
* init_afu() - setup as master context and start AFU
- * @cxlflash: Internal structure associated with the host.
+ * @cfg: Internal structure associated with the host.
*
* This routine is a higher level of control for configuring the
* AFU on probe and reset paths.
*
- * Return:
- * 0 on success
- * -ENOMEM when unable to map the AFU MMIO space
- * A failure value from internal services.
+ * Return: 0 on success, -errno on failure
*/
static int init_afu(struct cxlflash_cfg *cfg)
{
@@ -1955,36 +1721,38 @@ static int init_afu(struct cxlflash_cfg *cfg)
if (rc) {
dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
__func__, rc);
- goto err1;
+ goto out;
}
- /* Map the entire MMIO space of the AFU.
- */
+ /* Map the entire MMIO space of the AFU */
afu->afu_map = cxl_psa_map(cfg->mcctx);
if (!afu->afu_map) {
- rc = -ENOMEM;
- term_mc(cfg, UNDO_START);
dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
+ rc = -ENOMEM;
goto err1;
}
- /* don't byte reverse on reading afu_version, else the string form */
- /* will be backwards */
- reg = afu->afu_map->global.regs.afu_version;
- memcpy(afu->version, &reg, 8);
+ /* No byte reverse on reading afu_version or string will be backwards */
+ reg = readq(&afu->afu_map->global.regs.afu_version);
+ memcpy(afu->version, &reg, sizeof(reg));
afu->interface_version =
readq_be(&afu->afu_map->global.regs.interface_version);
- pr_debug("%s: afu version %s, interface version 0x%llX\n",
- __func__, afu->version, afu->interface_version);
+ if ((afu->interface_version + 1) == 0) {
+ pr_err("Back level AFU, please upgrade. AFU version %s "
+ "interface version 0x%llx\n", afu->version,
+ afu->interface_version);
+ rc = -EINVAL;
+ goto err2;
+ }
+
+ pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
+ afu->version, afu->interface_version);
rc = start_afu(cfg);
if (rc) {
dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
__func__, rc);
- term_mc(cfg, UNDO_START);
- cxl_psa_unmap((void *)afu->afu_map);
- afu->afu_map = NULL;
- goto err1;
+ goto err2;
}
afu_err_intr_init(cfg->afu);
@@ -1992,98 +1760,19 @@ static int init_afu(struct cxlflash_cfg *cfg)
/* Restore the LUN mappings */
cxlflash_restore_luntable(cfg);
-err1:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_send_cmd() - sends an AFU command
- * @afu: AFU associated with the host.
- * @cmd: AFU command to send.
- *
- * Return:
- * 0 on success
- * -1 on failure
- */
-int cxlflash_send_cmd(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- int nretry = 0;
- int rc = 0;
- u64 room;
- long newval;
-
- /*
- * This routine is used by critical users such an AFU sync and to
- * send a task management function (TMF). Thus we want to retry a
- * bit before returning an error. To avoid the performance penalty
- * of MMIO, we spread the update of 'room' over multiple commands.
- */
-retry:
- newval = atomic64_dec_if_positive(&afu->room);
- if (!newval) {
- do {
- room = readq_be(&afu->host_map->cmd_room);
- atomic64_set(&afu->room, room);
- if (room)
- goto write_ioarrin;
- udelay(nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- pr_err("%s: no cmd_room to send 0x%X\n",
- __func__, cmd->rcb.cdb[0]);
-
- goto no_room;
- } else if (unlikely(newval < 0)) {
- /* This should be rare. i.e. Only if two threads race and
- * decrement before the MMIO read is done. In this case
- * just benefit from the other thread having updated
- * afu->room.
- */
- if (nretry++ < MC_ROOM_RETRY_CNT) {
- udelay(nretry);
- goto retry;
- }
-
- goto no_room;
- }
-
-write_ioarrin:
- writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
out:
- pr_debug("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
- cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
-no_room:
- afu->read_room = true;
- schedule_work(&cfg->work_q);
- rc = SCSI_MLQUEUE_HOST_BUSY;
+err2:
+ cxl_psa_unmap((void __iomem *)afu->afu_map);
+ afu->afu_map = NULL;
+err1:
+ term_mc(cfg, UNDO_START);
goto out;
}
/**
- * cxlflash_wait_resp() - polls for a response or timeout to a sent AFU command
- * @afu: AFU associated with the host.
- * @cmd: AFU command that was sent.
- */
-void cxlflash_wait_resp(struct afu *afu, struct afu_cmd *cmd)
-{
- ulong timeout = jiffies + (cmd->rcb.timeout * 2 * HZ);
-
- timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
- if (!timeout)
- cxlflash_context_reset(cmd);
-
- if (unlikely(cmd->sa.ioasc != 0))
- pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
- "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
- cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
- cmd->sa.rc.fc_rc);
-}
-
-/**
* cxlflash_afu_sync() - builds and sends an AFU sync command
* @afu: AFU associated with the host.
* @ctx_hndl_u: Identifies context requesting sync.
@@ -2091,7 +1780,7 @@ void cxlflash_wait_resp(struct afu *afu, struct afu_cmd *cmd)
* @mode: Type of sync to issue (lightweight, heavyweight, global).
*
* The AFU can only take 1 sync command at a time. This routine enforces this
- * limitation by using a mutex to provide exlusive access to the AFU during
+ * limitation by using a mutex to provide exclusive access to the AFU during
* the sync. This design point requires calling threads to not be on interrupt
* context due to the possibility of sleeping during concurrent sync operations.
*
@@ -2109,6 +1798,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
res_hndl_t res_hndl_u, u8 mode)
{
struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
struct afu_cmd *cmd = NULL;
int rc = 0;
int retry_cnt = 0;
@@ -2121,13 +1811,13 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
mutex_lock(&sync_active);
retry:
- cmd = cxlflash_cmd_checkout(afu);
+ cmd = cmd_checkout(afu);
if (unlikely(!cmd)) {
retry_cnt++;
udelay(1000 * retry_cnt);
if (retry_cnt < MC_RETRY_CNT)
goto retry;
- pr_err("%s: could not get a free command\n", __func__);
+ dev_err(dev, "%s: could not get a free command\n", __func__);
rc = -1;
goto out;
}
@@ -2147,36 +1837,34 @@ retry:
cmd->rcb.cdb[1] = mode;
/* The cdb is aligned, no unaligned accessors required */
- *((u16 *)&cmd->rcb.cdb[2]) = swab16(ctx_hndl_u);
- *((u32 *)&cmd->rcb.cdb[4]) = swab32(res_hndl_u);
+ *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
+ *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
- rc = cxlflash_send_cmd(afu, cmd);
+ rc = send_cmd(afu, cmd);
if (unlikely(rc))
goto out;
- cxlflash_wait_resp(afu, cmd);
+ wait_resp(afu, cmd);
- /* set on timeout */
+ /* Set on timeout */
if (unlikely((cmd->sa.ioasc != 0) ||
(cmd->sa.host_use_b[0] & B_ERROR)))
rc = -1;
out:
mutex_unlock(&sync_active);
if (cmd)
- cxlflash_cmd_checkin(cmd);
+ cmd_checkin(cmd);
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
}
/**
- * cxlflash_afu_reset() - resets the AFU
- * @cxlflash: Internal structure associated with the host.
+ * afu_reset() - resets the AFU
+ * @cfg: Internal structure associated with the host.
*
- * Return:
- * 0 on success
- * A failure value from internal services.
+ * Return: 0 on success, -errno on failure
*/
-int cxlflash_afu_reset(struct cxlflash_cfg *cfg)
+static int afu_reset(struct cxlflash_cfg *cfg)
{
int rc = 0;
/* Stop the context before the reset. Since the context is
@@ -2192,6 +1880,413 @@ int cxlflash_afu_reset(struct cxlflash_cfg *cfg)
}
/**
+ * cxlflash_eh_device_reset_handler() - reset a single LUN
+ * @scp: SCSI command to send.
+ *
+ * Return:
+ * SUCCESS as defined in scsi/scsi.h
+ * FAILED as defined in scsi/scsi.h
+ */
+static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
+{
+ int rc = SUCCESS;
+ struct Scsi_Host *host = scp->device->host;
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ struct afu *afu = cfg->afu;
+ int rcr = 0;
+
+ pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
+ "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
+ host->host_no, scp->device->channel,
+ scp->device->id, scp->device->lun,
+ get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+
+retry:
+ switch (cfg->state) {
+ case STATE_NORMAL:
+ rcr = send_tmf(afu, scp, TMF_LUN_RESET);
+ if (unlikely(rcr))
+ rc = FAILED;
+ break;
+ case STATE_RESET:
+ wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
+ goto retry;
+ default:
+ rc = FAILED;
+ break;
+ }
+
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_eh_host_reset_handler() - reset the host adapter
+ * @scp: SCSI command from stack identifying host.
+ *
+ * Return:
+ * SUCCESS as defined in scsi/scsi.h
+ * FAILED as defined in scsi/scsi.h
+ */
+static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
+{
+ int rc = SUCCESS;
+ int rcr = 0;
+ struct Scsi_Host *host = scp->device->host;
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+
+ pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
+ "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
+ host->host_no, scp->device->channel,
+ scp->device->id, scp->device->lun,
+ get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+
+ switch (cfg->state) {
+ case STATE_NORMAL:
+ cfg->state = STATE_RESET;
+ cxlflash_mark_contexts_error(cfg);
+ rcr = afu_reset(cfg);
+ if (rcr) {
+ rc = FAILED;
+ cfg->state = STATE_FAILTERM;
+ } else
+ cfg->state = STATE_NORMAL;
+ wake_up_all(&cfg->reset_waitq);
+ break;
+ case STATE_RESET:
+ wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
+ if (cfg->state == STATE_NORMAL)
+ break;
+ /* fall through */
+ default:
+ rc = FAILED;
+ break;
+ }
+
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_change_queue_depth() - change the queue depth for the device
+ * @sdev: SCSI device destined for queue depth change.
+ * @qdepth: Requested queue depth value to set.
+ *
+ * The requested queue depth is capped to the maximum supported value.
+ *
+ * Return: The actual queue depth set.
+ */
+static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+
+ if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
+ qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
+
+ scsi_change_queue_depth(sdev, qdepth);
+ return sdev->queue_depth;
+}
+
+/**
+ * cxlflash_show_port_status() - queries and presents the current port status
+ * @port: Desired port for status reporting.
+ * @afu: AFU owning the specified port.
+ * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
+{
+ char *disp_status;
+ u64 status;
+ __be64 __iomem *fc_regs;
+
+ if (port >= NUM_FC_PORTS)
+ return 0;
+
+ fc_regs = &afu->afu_map->global.fc_regs[port][0];
+ status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+ status &= FC_MTIP_STATUS_MASK;
+
+ if (status == FC_MTIP_STATUS_ONLINE)
+ disp_status = "online";
+ else if (status == FC_MTIP_STATUS_OFFLINE)
+ disp_status = "offline";
+ else
+ disp_status = "unknown";
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
+}
+
+/**
+ * port0_show() - queries and presents the current status of port 0
+ * @dev: Generic device associated with the host owning the port.
+ * @attr: Device attribute representing the port.
+ * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t port0_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct afu *afu = cfg->afu;
+
+ return cxlflash_show_port_status(0, afu, buf);
+}
+
+/**
+ * port1_show() - queries and presents the current status of port 1
+ * @dev: Generic device associated with the host owning the port.
+ * @attr: Device attribute representing the port.
+ * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t port1_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct afu *afu = cfg->afu;
+
+ return cxlflash_show_port_status(1, afu, buf);
+}
+
+/**
+ * lun_mode_show() - presents the current LUN mode of the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the LUN mode.
+ * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t lun_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct afu *afu = cfg->afu;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
+}
+
+/**
+ * lun_mode_store() - sets the LUN mode of the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the LUN mode.
+ * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
+ * @count: Length of data resizing in @buf.
+ *
+ * The CXL Flash AFU supports a dummy LUN mode where the external
+ * links and storage are not required. Space on the FPGA is used
+ * to create 1 or 2 small LUNs which are presented to the system
+ * as if they were a normal storage device. This feature is useful
+ * during development and also provides manufacturing with a way
+ * to test the AFU without an actual device.
+ *
+ * 0 = external LUN[s] (default)
+ * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
+ * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
+ * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
+ * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t lun_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct afu *afu = cfg->afu;
+ int rc;
+ u32 lun_mode;
+
+ rc = kstrtouint(buf, 10, &lun_mode);
+ if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
+ afu->internal_lun = lun_mode;
+ afu_reset(cfg);
+ scsi_scan_host(cfg->host);
+ }
+
+ return count;
+}
+
+/**
+ * ioctl_version_show() - presents the current ioctl version of the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the ioctl version.
+ * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t ioctl_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
+}
+
+/**
+ * cxlflash_show_port_lun_table() - queries and presents the port LUN table
+ * @port: Desired port for status reporting.
+ * @afu: AFU owning the specified port.
+ * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_show_port_lun_table(u32 port,
+ struct afu *afu,
+ char *buf)
+{
+ int i;
+ ssize_t bytes = 0;
+ __be64 __iomem *fc_port;
+
+ if (port >= NUM_FC_PORTS)
+ return 0;
+
+ fc_port = &afu->afu_map->global.fc_port[port][0];
+
+ for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
+ bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+ "%03d: %016llX\n", i, readq_be(&fc_port[i]));
+ return bytes;
+}
+
+/**
+ * port0_lun_table_show() - presents the current LUN table of port 0
+ * @dev: Generic device associated with the host owning the port.
+ * @attr: Device attribute representing the port.
+ * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t port0_lun_table_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct afu *afu = cfg->afu;
+
+ return cxlflash_show_port_lun_table(0, afu, buf);
+}
+
+/**
+ * port1_lun_table_show() - presents the current LUN table of port 1
+ * @dev: Generic device associated with the host owning the port.
+ * @attr: Device attribute representing the port.
+ * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t port1_lun_table_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct afu *afu = cfg->afu;
+
+ return cxlflash_show_port_lun_table(1, afu, buf);
+}
+
+/**
+ * mode_show() - presents the current mode of the device
+ * @dev: Generic device associated with the device.
+ * @attr: Device attribute representing the device mode.
+ * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ sdev->hostdata ? "superpipe" : "legacy");
+}
+
+/*
+ * Host attributes
+ */
+static DEVICE_ATTR_RO(port0);
+static DEVICE_ATTR_RO(port1);
+static DEVICE_ATTR_RW(lun_mode);
+static DEVICE_ATTR_RO(ioctl_version);
+static DEVICE_ATTR_RO(port0_lun_table);
+static DEVICE_ATTR_RO(port1_lun_table);
+
+static struct device_attribute *cxlflash_host_attrs[] = {
+ &dev_attr_port0,
+ &dev_attr_port1,
+ &dev_attr_lun_mode,
+ &dev_attr_ioctl_version,
+ &dev_attr_port0_lun_table,
+ &dev_attr_port1_lun_table,
+ NULL
+};
+
+/*
+ * Device attributes
+ */
+static DEVICE_ATTR_RO(mode);
+
+static struct device_attribute *cxlflash_dev_attrs[] = {
+ &dev_attr_mode,
+ NULL
+};
+
+/*
+ * Host template
+ */
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .name = CXLFLASH_ADAPTER_NAME,
+ .info = cxlflash_driver_info,
+ .ioctl = cxlflash_ioctl,
+ .proc_name = CXLFLASH_NAME,
+ .queuecommand = cxlflash_queuecommand,
+ .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
+ .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
+ .change_queue_depth = cxlflash_change_queue_depth,
+ .cmd_per_lun = 16,
+ .can_queue = CXLFLASH_MAX_CMDS,
+ .this_id = -1,
+ .sg_tablesize = SG_NONE, /* No scatter gather support */
+ .max_sectors = CXLFLASH_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = cxlflash_host_attrs,
+ .sdev_attrs = cxlflash_dev_attrs,
+};
+
+/*
+ * Device dependent values
+ */
+static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
+
+/*
+ * PCI device binding table
+ */
+static struct pci_device_id cxlflash_pci_table[] = {
+ {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
+
+/**
* cxlflash_worker_thread() - work thread handler for the AFU
* @work: Work structure contained within cxlflash associated with host.
*
@@ -2199,12 +2294,14 @@ int cxlflash_afu_reset(struct cxlflash_cfg *cfg)
* - Link reset which cannot be performed on interrupt context due to
* blocking up to a few seconds
* - Read AFU command room
+ * - Rescan the host
*/
static void cxlflash_worker_thread(struct work_struct *work)
{
struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
work_q);
struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
int port;
ulong lock_flags;
@@ -2218,15 +2315,15 @@ static void cxlflash_worker_thread(struct work_struct *work)
if (cfg->lr_state == LINK_RESET_REQUIRED) {
port = cfg->lr_port;
if (port < 0)
- pr_err("%s: invalid port index %d\n", __func__, port);
+ dev_err(dev, "%s: invalid port index %d\n",
+ __func__, port);
else {
spin_unlock_irqrestore(cfg->host->host_lock,
lock_flags);
/* The reset can block... */
afu_link_reset(afu, port,
- &afu->afu_map->
- global.fc_regs[port][0]);
+ &afu->afu_map->global.fc_regs[port][0]);
spin_lock_irqsave(cfg->host->host_lock, lock_flags);
}
@@ -2239,6 +2336,9 @@ static void cxlflash_worker_thread(struct work_struct *work)
}
spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
+
+ if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
+ scsi_scan_host(cfg->host);
}
/**
@@ -2246,7 +2346,7 @@ static void cxlflash_worker_thread(struct work_struct *work)
* @pdev: PCI device associated with the host.
* @dev_id: PCI device id associated with device.
*
- * Return: 0 on success / non-zero on failure
+ * Return: 0 on success, -errno on failure
*/
static int cxlflash_probe(struct pci_dev *pdev,
const struct pci_device_id *dev_id)
@@ -2281,14 +2381,16 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->host = host;
rc = alloc_mem(cfg);
if (rc) {
- dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
+ dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n",
__func__);
rc = -ENOMEM;
+ scsi_host_put(cfg->host);
goto out;
}
cfg->init_state = INIT_STATE_NONE;
cfg->dev = pdev;
+ cfg->cxl_fops = cxlflash_cxl_fops;
/*
* The promoted LUNs move to the top of the LUN table. The rest stay
@@ -2301,28 +2403,30 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
cfg->dev_id = (struct pci_device_id *)dev_id;
- cfg->mcctx = NULL;
init_waitqueue_head(&cfg->tmf_waitq);
- init_waitqueue_head(&cfg->limbo_waitq);
+ init_waitqueue_head(&cfg->reset_waitq);
INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
cfg->lr_state = LINK_RESET_INVALID;
cfg->lr_port = -1;
+ spin_lock_init(&cfg->tmf_slock);
mutex_init(&cfg->ctx_tbl_list_mutex);
mutex_init(&cfg->ctx_recovery_mutex);
+ init_rwsem(&cfg->ioctl_rwsem);
INIT_LIST_HEAD(&cfg->ctx_err_recovery);
INIT_LIST_HEAD(&cfg->lluns);
pci_set_drvdata(pdev, cfg);
- /* Use the special service provided to look up the physical
+ /*
+ * Use the special service provided to look up the physical
* PCI device, since we are called on the probe of the virtual
* PCI host bus (vphb)
*/
phys_dev = cxl_get_phys_dev(pdev);
if (!dev_is_pci(phys_dev)) {
- pr_err("%s: not a pci dev\n", __func__);
+ dev_err(&pdev->dev, "%s: not a pci dev\n", __func__);
rc = -ENODEV;
goto out_remove;
}
@@ -2346,7 +2450,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
}
cfg->init_state = INIT_STATE_AFU;
-
rc = init_scsi(cfg);
if (rc) {
dev_err(&pdev->dev, "%s: call to init_scsi "
@@ -2365,6 +2468,19 @@ out_remove:
}
/**
+ * drain_ioctls() - wait until all currently executing ioctls have completed
+ * @cfg: Internal structure associated with the host.
+ *
+ * Obtain write access to read/write semaphore that wraps ioctl
+ * handling to 'drain' ioctls currently executing.
+ */
+static void drain_ioctls(struct cxlflash_cfg *cfg)
+{
+ down_write(&cfg->ioctl_rwsem);
+ up_write(&cfg->ioctl_rwsem);
+}
+
+/**
* cxlflash_pci_error_detected() - called when a PCI error is detected
* @pdev: PCI device struct.
* @state: PCI channel state.
@@ -2382,21 +2498,19 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
switch (state) {
case pci_channel_io_frozen:
- cfg->state = STATE_LIMBO;
-
- /* Turn off legacy I/O */
+ cfg->state = STATE_RESET;
scsi_block_requests(cfg->host);
+ drain_ioctls(cfg);
rc = cxlflash_mark_contexts_error(cfg);
if (unlikely(rc))
dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
__func__, rc);
term_mc(cfg, UNDO_START);
stop_afu(cfg);
-
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
cfg->state = STATE_FAILTERM;
- wake_up_all(&cfg->limbo_waitq);
+ wake_up_all(&cfg->reset_waitq);
scsi_unblock_requests(cfg->host);
return PCI_ERS_RESULT_DISCONNECT;
default:
@@ -2443,7 +2557,7 @@ static void cxlflash_pci_resume(struct pci_dev *pdev)
dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->limbo_waitq);
+ wake_up_all(&cfg->reset_waitq);
scsi_unblock_requests(cfg->host);
}
@@ -2467,7 +2581,7 @@ static struct pci_driver cxlflash_driver = {
/**
* init_cxlflash() - module entry point
*
- * Return: 0 on success / non-zero on failure
+ * Return: 0 on success, -errno on failure
*/
static int __init init_cxlflash(void)
{
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index cf0e80938b13..60324566c14f 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -99,6 +99,7 @@ struct asyc_intr_info {
u8 action;
#define CLR_FC_ERROR 0x01
#define LINK_RESET 0x02
+#define SCAN_HOST 0x04
};
#ifndef CONFIG_CXL_EEH
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 63bf394fe78c..0b3366f5e6f6 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -146,7 +146,7 @@ struct sisl_rc {
#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */
#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */
#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI
- reported len, possbly due to dropped
+ reported len, possibly due to dropped
frames */
#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */
};
@@ -258,7 +258,7 @@ struct sisl_host_map {
__be64 rrq_start; /* start & end are both inclusive */
__be64 rrq_end; /* write sequence: start followed by end */
__be64 cmd_room;
- __be64 ctx_ctrl; /* least signiifcant byte or b56:63 is LISN# */
+ __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
__be64 mbox_w; /* restricted use */
};
@@ -290,7 +290,7 @@ struct sisl_global_regs {
#define SISL_ASTATUS_FC0_LOGO 0x4000ULL /* b49, target sent FLOGI/PLOGI/LOGO
while logged in */
#define SISL_ASTATUS_FC0_CRC_T 0x2000ULL /* b50, CRC threshold exceeded */
-#define SISL_ASTATUS_FC0_LOGI_R 0x1000ULL /* b51, login state mechine timed out
+#define SISL_ASTATUS_FC0_LOGI_R 0x1000ULL /* b51, login state machine timed out
and retrying */
#define SISL_ASTATUS_FC0_LOGI_F 0x0800ULL /* b52, login failed,
FC_ERROR[19:0] */
@@ -340,7 +340,7 @@ struct sisl_global_regs {
#define SISL_AFUCONF_MBOX_CLR_READ 0x0010ULL
__be64 afu_config;
__be64 rsvd[0xf8];
- __be64 afu_version;
+ __le64 afu_version;
__be64 interface_version;
};
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index f1b62cea75b1..cac2e6a50efd 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -76,7 +76,7 @@ void cxlflash_free_errpage(void)
*
* When the host needs to go down, all users must be quiesced and their
* memory freed. This is accomplished by putting the contexts in error
- * state which will notify the user and let them 'drive' the tear-down.
+ * state which will notify the user and let them 'drive' the tear down.
* Meanwhile, this routine camps until all user contexts have been removed.
*/
void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
@@ -100,7 +100,7 @@ void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
__func__);
- wake_up_all(&cfg->limbo_waitq);
+ wake_up_all(&cfg->reset_waitq);
ssleep(1);
}
}
@@ -162,10 +162,7 @@ struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
if (likely(ctxid < MAX_CONTEXT)) {
while (true) {
- rc = mutex_lock_interruptible(&cfg->ctx_tbl_list_mutex);
- if (rc)
- goto out;
-
+ mutex_lock(&cfg->ctx_tbl_list_mutex);
ctxi = cfg->ctx_tbl[ctxid];
if (ctxi)
if ((file && (ctxi->file != file)) ||
@@ -253,7 +250,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
{
struct device *dev = &cfg->dev->dev;
struct afu *afu = cfg->afu;
- struct sisl_ctrl_map *ctrl_map = ctxi->ctrl_map;
+ struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
int rc = 0;
u64 val;
@@ -283,6 +280,24 @@ out:
* @sdev: SCSI device associated with LUN.
* @lli: LUN destined for capacity request.
*
+ * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
+ * in scsi_execute(), the EEH handler will attempt to recover. As part of the
+ * recovery, the handler drains all currently running ioctls, waiting until they
+ * have completed before proceeding with a reset. As this routine is used on the
+ * ioctl path, this can create a condition where the EEH handler becomes stuck,
+ * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily
+ * unmark this thread as an ioctl thread by releasing the ioctl read semaphore.
+ * This will allow the EEH handler to proceed with a recovery while this thread
+ * is still running. Once the scsi_execute() returns, reacquire the ioctl read
+ * semaphore and check the adapter state in case it changed while inside of
+ * scsi_execute(). The state check will wait if the adapter is still being
+ * recovered or return a failure if the recovery failed. In the event that the
+ * adapter reset failed, simply return the failure as the ioctl would be unable
+ * to continue.
+ *
+ * Note that the above puts a requirement on this routine to only be called on
+ * an ioctl thread.
+ *
* Return: 0 on success, -errno on failure
*/
static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
@@ -296,7 +311,7 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
int rc = 0;
int result = 0;
int retry_cnt = 0;
- u32 tout = (MC_DISCOVERY_TIMEOUT * HZ);
+ u32 to = CMD_TIMEOUT * HZ;
retry:
cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
@@ -314,8 +329,18 @@ retry:
dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__,
retry_cnt ? "re" : "", scsi_cmd[0]);
+ /* Drop the ioctl read semahpore across lengthy call */
+ up_read(&cfg->ioctl_rwsem);
result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
- CMD_BUFSIZE, sense_buf, tout, 5, 0, NULL);
+ CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL);
+ down_read(&cfg->ioctl_rwsem);
+ rc = check_state(cfg);
+ if (rc) {
+ dev_err(dev, "%s: Failed state! result=0x08%X\n",
+ __func__, result);
+ rc = -ENODEV;
+ goto out;
+ }
if (driver_byte(result) == DRIVER_SENSE) {
result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
@@ -365,8 +390,8 @@ retry:
* as the buffer is allocated on an aligned boundary.
*/
mutex_lock(&gli->mutex);
- gli->max_lba = be64_to_cpu(*((u64 *)&cmd_buf[0]));
- gli->blk_len = be32_to_cpu(*((u32 *)&cmd_buf[8]));
+ gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
+ gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
mutex_unlock(&gli->mutex);
out:
@@ -712,7 +737,6 @@ static void destroy_context(struct cxlflash_cfg *cfg,
kfree(ctxi->rht_needs_ws);
kfree(ctxi->rht_lun);
kfree(ctxi);
- atomic_dec_if_positive(&cfg->num_user_contexts);
}
/**
@@ -737,7 +761,7 @@ static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
struct afu *afu = cfg->afu;
struct ctx_info *ctxi = NULL;
struct llun_info **lli = NULL;
- bool *ws = NULL;
+ u8 *ws = NULL;
struct sisl_rht_entry *rhte;
ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
@@ -769,7 +793,6 @@ static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
INIT_LIST_HEAD(&ctxi->luns);
INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
- atomic_inc(&cfg->num_user_contexts);
mutex_lock(&ctxi->mutex);
out:
return ctxi;
@@ -880,6 +903,9 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
sys_close(lfd);
}
+ /* Release the sdev reference that bound this LUN to the context */
+ scsi_device_put(sdev);
+
out:
if (put_ctx)
put_context(ctxi);
@@ -1161,10 +1187,7 @@ out:
return rc;
}
-/*
- * Local fops for adapter file descriptor
- */
-static const struct file_operations cxlflash_cxl_fops = {
+const struct file_operations cxlflash_cxl_fops = {
.owner = THIS_MODULE,
.mmap = cxlflash_cxl_mmap,
.release = cxlflash_cxl_release,
@@ -1211,6 +1234,46 @@ static const struct file_operations null_fops = {
};
/**
+ * check_state() - checks and responds to the current adapter state
+ * @cfg: Internal structure associated with the host.
+ *
+ * This routine can block and should only be used on process context.
+ * It assumes that the caller is an ioctl thread and holding the ioctl
+ * read semaphore. This is temporarily let up across the wait to allow
+ * for draining actively running ioctls. Also note that when waking up
+ * from waiting in reset, the state is unknown and must be checked again
+ * before proceeding.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int check_state(struct cxlflash_cfg *cfg)
+{
+ struct device *dev = &cfg->dev->dev;
+ int rc = 0;
+
+retry:
+ switch (cfg->state) {
+ case STATE_RESET:
+ dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
+ up_read(&cfg->ioctl_rwsem);
+ rc = wait_event_interruptible(cfg->reset_waitq,
+ cfg->state != STATE_RESET);
+ down_read(&cfg->ioctl_rwsem);
+ if (unlikely(rc))
+ break;
+ goto retry;
+ case STATE_FAILTERM:
+ dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
+ rc = -ENODEV;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/**
* cxlflash_disk_attach() - attach a LUN to a context
* @sdev: SCSI device associated with LUN.
* @attach: Attach ioctl data structure.
@@ -1243,10 +1306,6 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
int fd = -1;
- /* On first attach set fileops */
- if (atomic_read(&cfg->num_user_contexts) == 0)
- cfg->cxl_fops = cxlflash_cxl_fops;
-
if (attach->num_interrupts > 4) {
dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
__func__, attach->num_interrupts);
@@ -1287,11 +1346,17 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
}
}
+ rc = scsi_device_get(sdev);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: Unable to get sdev reference!\n", __func__);
+ goto out;
+ }
+
lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
if (unlikely(!lun_access)) {
dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
rc = -ENOMEM;
- goto out;
+ goto err0;
}
lun_access->lli = lli;
@@ -1311,21 +1376,21 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
rc = -ENODEV;
- goto err0;
+ goto err1;
}
ctxid = cxl_process_element(ctx);
if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
rc = -EPERM;
- goto err1;
+ goto err2;
}
file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err1;
+ goto err2;
}
/* Translate read/write O_* flags from fcntl.h to AFU permission bits */
@@ -1335,7 +1400,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
if (unlikely(!ctxi)) {
dev_err(dev, "%s: Failed to create context! (%d)\n",
__func__, ctxid);
- goto err2;
+ goto err3;
}
work = &ctxi->work;
@@ -1346,13 +1411,13 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc);
- goto err3;
+ goto err4;
}
rc = afu_attach(cfg, ctxi);
if (unlikely(rc)) {
dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
- goto err4;
+ goto err5;
}
/*
@@ -1375,7 +1440,8 @@ out_attach:
attach->block_size = gli->blk_len;
attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
attach->last_lba = gli->max_lba;
- attach->max_xfer = (sdev->host->max_sectors * 512) / gli->blk_len;
+ attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
+ attach->max_xfer /= gli->blk_len;
out:
attach->adap_fd = fd;
@@ -1387,13 +1453,13 @@ out:
__func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
return rc;
-err4:
+err5:
cxl_stop_context(ctx);
-err3:
+err4:
put_context(ctxi);
destroy_context(cfg, ctxi);
ctxi = NULL;
-err2:
+err3:
/*
* Here, we're overriding the fops with a dummy all-NULL fops because
* fput() calls the release fop, which will cause us to mistakenly
@@ -1405,10 +1471,12 @@ err2:
fput(file);
put_unused_fd(fd);
fd = -1;
-err1:
+err2:
cxl_release_context(ctx);
-err0:
+err1:
kfree(lun_access);
+err0:
+ scsi_device_put(sdev);
goto out;
}
@@ -1511,41 +1579,6 @@ err1:
}
/**
- * check_state() - checks and responds to the current adapter state
- * @cfg: Internal structure associated with the host.
- *
- * This routine can block and should only be used on process context.
- * Note that when waking up from waiting in limbo, the state is unknown
- * and must be checked again before proceeding.
- *
- * Return: 0 on success, -errno on failure
- */
-static int check_state(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
-retry:
- switch (cfg->state) {
- case STATE_LIMBO:
- dev_dbg(dev, "%s: Limbo, going to wait...\n", __func__);
- rc = wait_event_interruptible(cfg->limbo_waitq,
- cfg->state != STATE_LIMBO);
- if (unlikely(rc))
- break;
- goto retry;
- case STATE_FAILTERM:
- dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
- rc = -ENODEV;
- break;
- default:
- break;
- }
-
- return rc;
-}
-
-/**
* cxlflash_afu_recover() - initiates AFU recovery
* @sdev: SCSI device associated with LUN.
* @recover: Recover ioctl data structure.
@@ -1561,10 +1594,10 @@ retry:
* quite possible for this routine to act as the kernel's EEH detection
* source (MMIO read of mbox_r). Because of this, there is a window of
* time where an EEH might have been detected but not yet 'serviced'
- * (callback invoked, causing the device to enter limbo state). To avoid
+ * (callback invoked, causing the device to enter reset state). To avoid
* looping in this routine during that window, a 1 second sleep is in place
* between the time the MMIO failure is detected and the time a wait on the
- * limbo wait queue is attempted via check_state().
+ * reset wait queue is attempted via check_state().
*
* Return: 0 on success, -errno on failure
*/
@@ -1634,9 +1667,14 @@ retry_recover:
/* Test if in error state */
reg = readq_be(&afu->ctrl_map->mbox_r);
if (reg == -1) {
- dev_dbg(dev, "%s: MMIO read fail! Wait for recovery...\n",
- __func__);
- mutex_unlock(&ctxi->mutex);
+ dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
+
+ /*
+ * Before checking the state, put back the context obtained with
+ * get_context() as it is no longer needed and sleep for a short
+ * period of time (see prolog notes).
+ */
+ put_context(ctxi);
ctxi = NULL;
ssleep(1);
rc = check_state(cfg);
@@ -1765,12 +1803,21 @@ static int cxlflash_disk_verify(struct scsi_device *sdev,
* inquiry (i.e. the Unit attention is due to the WWN changing).
*/
if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
+ /* Can't hold mutex across process_sense/read_cap16,
+ * since we could have an intervening EEH event.
+ */
+ ctxi->unavail = true;
+ mutex_unlock(&ctxi->mutex);
rc = process_sense(sdev, verify);
if (unlikely(rc)) {
dev_err(dev, "%s: Failed to validate sense data (%d)\n",
__func__, rc);
+ mutex_lock(&ctxi->mutex);
+ ctxi->unavail = false;
goto out;
}
+ mutex_lock(&ctxi->mutex);
+ ctxi->unavail = false;
}
switch (gli->mode) {
@@ -1955,6 +2002,14 @@ out:
* @cmd: IOCTL command.
* @arg: Userspace ioctl data structure.
*
+ * A read/write semaphore is used to implement a 'drain' of currently
+ * running ioctls. The read semaphore is taken at the beginning of each
+ * ioctl thread and released upon concluding execution. Additionally the
+ * semaphore should be released and then reacquired in any ioctl execution
+ * path which will wait for an event to occur that is outside the scope of
+ * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
+ * a thread simply needs to acquire the write semaphore.
+ *
* Return: 0 on success, -errno on failure
*/
int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
@@ -1989,6 +2044,9 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
};
+ /* Hold read semaphore so we can drain if needed */
+ down_read(&cfg->ioctl_rwsem);
+
/* Restrict command set to physical support only for internal LUN */
if (afu->internal_lun)
switch (cmd) {
@@ -2070,6 +2128,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
/* fall through to exit */
cxlflash_ioctl_exit:
+ up_read(&cfg->ioctl_rwsem);
if (unlikely(rc && known_ioctl))
dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
"returned rc %d\n", __func__,
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index d7dc88bc64a4..bede574bcd77 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -28,7 +28,10 @@ extern struct cxlflash_global global;
*/
#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */
-#define MC_DISCOVERY_TIMEOUT 5 /* 5 secs */
+#define CMD_TIMEOUT 30 /* 30 secs */
+#define CMD_RETRIES 5 /* 5 retries for scsi_execute */
+
+#define MAX_SECTOR_UNIT 512 /* max_sector is in 512 byte multiples */
#define CHAN2PORT(_x) ((_x) + 1)
#define PORT2CHAN(_x) ((_x) - 1)
@@ -60,7 +63,6 @@ struct llun_info {
u32 lun_index; /* Index in the LUN table */
u32 host_no; /* host_no from Scsi_host */
u32 port_sel; /* What port to use for this LUN */
- bool newly_created; /* Whether the LUN was just discovered */
bool in_table; /* Whether a LUN table entry was created */
u8 wwid[16]; /* Keep a duplicate copy here? */
@@ -84,17 +86,17 @@ enum ctx_ctrl {
CTX_CTRL_FILE = (1 << 5)
};
-#define ENCODE_CTXID(_ctx, _id) (((((u64)_ctx) & 0xFFFFFFFF0) << 28) | _id)
+#define ENCODE_CTXID(_ctx, _id) (((((u64)_ctx) & 0xFFFFFFFF0ULL) << 28) | _id)
#define DECODE_CTXID(_val) (_val & 0xFFFFFFFF)
struct ctx_info {
- struct sisl_ctrl_map *ctrl_map; /* initialized at startup */
+ struct sisl_ctrl_map __iomem *ctrl_map; /* initialized at startup */
struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment),
alloc/free on attach/detach */
u32 rht_out; /* Number of checked out RHT entries */
u32 rht_perms; /* User-defined permissions for RHT entries */
struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
- bool *rht_needs_ws; /* User-desired write-same function per RHTE */
+ u8 *rht_needs_ws; /* User-desired write-same function per RHTE */
struct cxl_ioctl_start_work work;
u64 ctxid;
@@ -144,4 +146,6 @@ void cxlflash_ba_terminate(struct ba_lun *);
int cxlflash_manage_lun(struct scsi_device *, struct dk_cxlflash_manage_lun *);
+int check_state(struct cxlflash_cfg *);
+
#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 6155cb1d4ed3..a53f583e2d7b 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -132,7 +132,7 @@ static int ba_init(struct ba_lun *ba_lun)
return -ENOMEM;
}
- /* Pass the allocated lun info as a handle to the user */
+ /* Pass the allocated LUN info as a handle to the user */
ba_lun->ba_lun_handle = bali;
pr_debug("%s: Successfully initialized the LUN: "
@@ -165,7 +165,7 @@ static int find_free_range(u32 low,
num_bits = (sizeof(*lam) * BITS_PER_BYTE);
bit_pos = find_first_bit(lam, num_bits);
- pr_devel("%s: Found free bit %llX in lun "
+ pr_devel("%s: Found free bit %llX in LUN "
"map entry %llX at bitmap index = %X\n",
__func__, bit_pos, bali->lun_alloc_map[i],
i);
@@ -400,6 +400,24 @@ static int init_vlun(struct llun_info *lli)
* @lba: Logical block address to start write same.
* @nblks: Number of logical blocks to write same.
*
+ * The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur
+ * while in scsi_execute(), the EEH handler will attempt to recover. As part of
+ * the recovery, the handler drains all currently running ioctls, waiting until
+ * they have completed before proceeding with a reset. As this routine is used
+ * on the ioctl path, this can create a condition where the EEH handler becomes
+ * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
+ * temporarily unmark this thread as an ioctl thread by releasing the ioctl read
+ * semaphore. This will allow the EEH handler to proceed with a recovery while
+ * this thread is still running. Once the scsi_execute() returns, reacquire the
+ * ioctl read semaphore and check the adapter state in case it changed while
+ * inside of scsi_execute(). The state check will wait if the adapter is still
+ * being recovered or return a failure if the recovery failed. In the event that
+ * the adapter reset failed, simply return the failure as the ioctl would be
+ * unable to continue.
+ *
+ * Note that the above puts a requirement on this routine to only be called on
+ * an ioctl thread.
+ *
* Return: 0 on success, -errno on failure
*/
static int write_same16(struct scsi_device *sdev,
@@ -414,7 +432,7 @@ static int write_same16(struct scsi_device *sdev,
int ws_limit = SISLITE_MAX_WS_BLOCKS;
u64 offset = lba;
int left = nblks;
- u32 tout = sdev->request_queue->rq_timeout;
+ u32 to = sdev->request_queue->rq_timeout;
struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
struct device *dev = &cfg->dev->dev;
@@ -433,8 +451,20 @@ static int write_same16(struct scsi_device *sdev,
put_unaligned_be32(ws_limit < left ? ws_limit : left,
&scsi_cmd[10]);
+ /* Drop the ioctl read semahpore across lengthy call */
+ up_read(&cfg->ioctl_rwsem);
result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
- CMD_BUFSIZE, sense_buf, tout, 5, 0, NULL);
+ CMD_BUFSIZE, sense_buf, to, CMD_RETRIES,
+ 0, NULL);
+ down_read(&cfg->ioctl_rwsem);
+ rc = check_state(cfg);
+ if (rc) {
+ dev_err(dev, "%s: Failed state! result=0x08%X\n",
+ __func__, result);
+ rc = -ENODEV;
+ goto out;
+ }
+
if (result) {
dev_err_ratelimited(dev, "%s: command failed for "
"offset %lld result=0x%x\n",
@@ -681,14 +711,14 @@ out:
}
/**
- * _cxlflash_vlun_resize() - changes the size of a virtual lun
+ * _cxlflash_vlun_resize() - changes the size of a virtual LUN
* @sdev: SCSI device associated with LUN owning virtual LUN.
* @ctxi: Context owning resources.
* @resize: Resize ioctl data structure.
*
* On successful return, the user is informed of the new size (in blocks)
- * of the virtual lun in last LBA format. When the size of the virtual
- * lun is zero, the last LBA is reflected as -1. See comment in the
+ * of the virtual LUN in last LBA format. When the size of the virtual
+ * LUN is zero, the last LBA is reflected as -1. See comment in the
* prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
* on the error recovery list.
*
@@ -785,7 +815,7 @@ void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
u32 chan;
u32 lind;
struct afu *afu = cfg->afu;
- struct sisl_global_map *agm = &afu->afu_map->global;
+ struct sisl_global_map __iomem *agm = &afu->afu_map->global;
mutex_lock(&global.mutex);
@@ -830,7 +860,7 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
u32 lind;
int rc = 0;
struct afu *afu = cfg->afu;
- struct sisl_global_map *agm = &afu->afu_map->global;
+ struct sisl_global_map __iomem *agm = &afu->afu_map->global;
mutex_lock(&global.mutex);
@@ -885,8 +915,8 @@ out:
* @arg: UVirtual ioctl data structure.
*
* On successful return, the user is informed of the resource handle
- * to be used to identify the virtual lun and the size (in blocks) of
- * the virtual lun in last LBA format. When the size of the virtual lun
+ * to be used to identify the virtual LUN and the size (in blocks) of
+ * the virtual LUN in last LBA format. When the size of the virtual LUN
* is zero, the last LBA is reflected as -1.
*
* Return: 0 on success, -errno on failure
@@ -914,16 +944,9 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
+ /* Setup the LUNs block allocator on first call */
mutex_lock(&gli->mutex);
if (gli->mode == MODE_NONE) {
- /* Setup the LUN table and block allocator on first call */
- rc = init_luntable(cfg, lli);
- if (rc) {
- dev_err(dev, "%s: call to init_luntable failed "
- "rc=%d!\n", __func__, rc);
- goto err0;
- }
-
rc = init_vlun(lli);
if (rc) {
dev_err(dev, "%s: call to init_vlun failed rc=%d!\n",
@@ -941,6 +964,13 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
}
mutex_unlock(&gli->mutex);
+ rc = init_luntable(cfg, lli);
+ if (rc) {
+ dev_err(dev, "%s: call to init_luntable failed rc=%d!\n",
+ __func__, rc);
+ goto err1;
+ }
+
ctxi = get_context(cfg, rctxid, lli, 0);
if (unlikely(!ctxi)) {
dev_err(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index bf0bbd42efb5..67669a9e73c1 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -939,6 +939,7 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
struct sk_buff *skb;
u16 len;
dma_addr_t pa;
+ int r;
len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
skb = dev_alloc_skb(len);
@@ -952,8 +953,19 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
skb_reset_network_header(skb);
skb_put(skb, len);
pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
+
+ r = pci_dma_mapping_error(fnic->pdev, pa);
+ if (r) {
+ printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ goto free_skb;
+ }
+
fnic_queue_rq_desc(rq, skb, pa, len);
return 0;
+
+free_skb:
+ kfree_skb(skb);
+ return r;
}
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
@@ -981,6 +993,7 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr;
unsigned long flags;
+ int r;
if (!fnic->vlan_hw_insert) {
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1003,18 +1016,27 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
- spin_lock_irqsave(&fnic->wq_lock[0], flags);
- if (!vnic_wq_desc_avail(wq)) {
- pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
- spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
- kfree_skb(skb);
- return;
+ r = pci_dma_mapping_error(fnic->pdev, pa);
+ if (r) {
+ printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ goto free_skb;
}
+ spin_lock_irqsave(&fnic->wq_lock[0], flags);
+ if (!vnic_wq_desc_avail(wq))
+ goto irq_restore;
+
fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
0 /* hw inserts cos value */,
fnic->vlan_id, 1);
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
+ return;
+
+irq_restore:
+ spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
+ pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
+free_skb:
+ kfree_skb(skb);
}
/*
@@ -1071,6 +1093,12 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
+ ret = pci_dma_mapping_error(fnic->pdev, pa);
+ if (ret) {
+ printk(KERN_ERR "DMA map failed with error %d\n", ret);
+ goto free_skb_on_err;
+ }
+
if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
(char *)eth_hdr, tot_len)) != 0) {
printk(KERN_ERR "fnic ctlr frame trace error!!!");
@@ -1082,15 +1110,17 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
pci_unmap_single(fnic->pdev, pa,
tot_len, PCI_DMA_TODEVICE);
ret = -1;
- goto fnic_send_frame_end;
+ goto irq_restore;
}
fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
0 /* hw inserts cos value */,
fnic->vlan_id, 1, 1, 1);
-fnic_send_frame_end:
+
+irq_restore:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
+free_skb_on_err:
if (ret)
dev_kfree_skb_any(fp_skb(fp));
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 25436cd2860c..266b909fe854 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -330,6 +330,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
int flags;
u8 exch_flags;
struct scsi_lun fc_lun;
+ int r;
if (sg_count) {
/* For each SGE, create a device desc entry */
@@ -346,6 +347,12 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
io_req->sgl_list,
sizeof(io_req->sgl_list[0]) * sg_count,
PCI_DMA_TODEVICE);
+
+ r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
+ if (r) {
+ printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
}
io_req->sense_buf_pa = pci_map_single(fnic->pdev,
@@ -353,6 +360,15 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
SCSI_SENSE_BUFFERSIZE,
PCI_DMA_FROMDEVICE);
+ r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa);
+ if (r) {
+ pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+ sizeof(io_req->sgl_list[0]) * sg_count,
+ PCI_DMA_TODEVICE);
+ printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
int_to_scsilun(sc->device->lun, &fc_lun);
/* Enqueue the descriptor in the Copy WQ */
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a5a56fa31e70..ceee9a3fd9e5 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -495,15 +495,17 @@ struct unsol_rcv_ct_ctx {
#define LPFC_USER_LINK_SPEED_8G 8 /* 8 Gigabaud */
#define LPFC_USER_LINK_SPEED_10G 10 /* 10 Gigabaud */
#define LPFC_USER_LINK_SPEED_16G 16 /* 16 Gigabaud */
-#define LPFC_USER_LINK_SPEED_MAX LPFC_USER_LINK_SPEED_16G
-#define LPFC_USER_LINK_SPEED_BITMAP ((1 << LPFC_USER_LINK_SPEED_16G) | \
+#define LPFC_USER_LINK_SPEED_32G 32 /* 32 Gigabaud */
+#define LPFC_USER_LINK_SPEED_MAX LPFC_USER_LINK_SPEED_32G
+#define LPFC_USER_LINK_SPEED_BITMAP ((1ULL << LPFC_USER_LINK_SPEED_32G) | \
+ (1 << LPFC_USER_LINK_SPEED_16G) | \
(1 << LPFC_USER_LINK_SPEED_10G) | \
(1 << LPFC_USER_LINK_SPEED_8G) | \
(1 << LPFC_USER_LINK_SPEED_4G) | \
(1 << LPFC_USER_LINK_SPEED_2G) | \
(1 << LPFC_USER_LINK_SPEED_1G) | \
(1 << LPFC_USER_LINK_SPEED_AUTO))
-#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
+#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16, 32"
enum nemb_type {
nemb_mse = 1,
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d65bd178d131..f6446d759d7f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1642,8 +1642,6 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
struct lpfc_hba *phba = vport->phba;\
- uint val = 0;\
- val = phba->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%d\n",\
phba->cfg_##attr);\
}
@@ -1808,8 +1806,6 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
- uint val = 0;\
- val = vport->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
}
@@ -1835,8 +1831,6 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
- uint val = 0;\
- val = vport->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
}
@@ -3282,15 +3276,20 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
if (val >= 0 && val <= 6) {
prev_val = phba->cfg_topology;
- phba->cfg_topology = val;
if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3113 Loop mode not supported at speed %d\n",
- phba->cfg_link_speed);
- phba->cfg_topology = prev_val;
+ val);
return -EINVAL;
}
+ if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
+ val == 4) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3114 Loop mode not supported\n");
+ return -EINVAL;
+ }
+ phba->cfg_topology = val;
if (nolip)
return strlen(buf);
@@ -3731,7 +3730,8 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
- ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) {
+ ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
+ ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb))) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2879 lpfc_link_speed attribute cannot be set "
"to %d. Speed is not supported by this port.\n",
@@ -5267,6 +5267,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
case LPFC_LINK_SPEED_16GHZ:
fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
break;
+ case LPFC_LINK_SPEED_32GHZ:
+ fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
+ break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index b705068079c0..05dcc2abd541 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -904,7 +904,6 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
uint32_t evt_req_id = 0;
uint32_t cmd;
- uint32_t len;
struct lpfc_dmabuf *dmabuf = NULL;
struct lpfc_bsg_event *evt;
struct event_data *evt_dat = NULL;
@@ -946,7 +945,6 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
evt_req_id = ct_req->FsType;
cmd = ct_req->CommandResponse.bits.CmdRsp;
- len = ct_req->CommandResponse.bits.Size;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
@@ -2988,7 +2986,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- struct diag_mode_test *diag_mode;
struct lpfc_bsg_event *evt;
struct event_data *evdat;
struct lpfc_sli *psli = &phba->sli;
@@ -3031,8 +3028,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
rc = -EINVAL;
goto loopback_test_exit;
}
- diag_mode = (struct diag_mode_test *)
- job->request->rqst_data.h_vendor.vendor_cmd;
if ((phba->link_state == LPFC_HBA_ERROR) ||
(psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
@@ -3293,7 +3288,6 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- struct get_mgmt_rev *event_req;
struct get_mgmt_rev_reply *event_reply;
int rc = 0;
@@ -3306,9 +3300,6 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
goto job_error;
}
- event_req = (struct get_mgmt_rev *)
- job->request->rqst_data.h_vendor.vendor_cmd;
-
event_reply = (struct get_mgmt_rev_reply *)
job->reply->reply_data.vendor_reply.vendor_rsp;
@@ -4348,7 +4339,6 @@ static int
lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
- struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
@@ -4362,9 +4352,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
phba->mbox_ext_buf_ctx.seqNum++;
nemb_tp = phba->mbox_ext_buf_ctx.nembType;
- sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
- phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
-
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
rc = -ENOMEM;
@@ -4606,7 +4593,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
uint32_t transmit_length, receive_length, mode;
struct lpfc_mbx_sli4_config *sli4_config;
struct lpfc_mbx_nembed_cmd *nembed_sge;
- struct mbox_header *header;
struct ulp_bde64 *bde;
uint8_t *ext = NULL;
int rc = 0;
@@ -4804,8 +4790,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* rebuild the command for sli4 using our
* own buffers like we do for biu diags
*/
- header = (struct mbox_header *)
- &pmb->un.varWords[0];
nembed_sge = (struct lpfc_mbx_nembed_cmd *)
&pmb->un.varWords[0];
receive_length = nembed_sge->sge[0].length;
@@ -5048,7 +5032,6 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
IOCB_t *cmd;
int rc = 0;
struct menlo_command *menlo_cmd;
- struct menlo_response *menlo_resp;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
int request_nseg;
int reply_nseg;
@@ -5088,9 +5071,6 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
menlo_cmd = (struct menlo_command *)
job->request->rqst_data.h_vendor.vendor_cmd;
- menlo_resp = (struct menlo_response *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
-
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index af129966bd11..8fded1f7605f 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -55,6 +55,7 @@
#define HBA_PORTSPEED_10GBIT 0x0004 /* 10 GBit/sec */
#define HBA_PORTSPEED_8GBIT 0x0010 /* 8 GBit/sec */
#define HBA_PORTSPEED_16GBIT 0x0020 /* 16 GBit/sec */
+#define HBA_PORTSPEED_32GBIT 0x0040 /* 32 GBit/sec */
#define HBA_PORTSPEED_UNKNOWN 0x0800 /* Unknown */
#define FOURBYTES 4
@@ -575,7 +576,6 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
- struct lpfc_dmabuf *bmp;
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_nodelist *ndlp;
@@ -588,7 +588,6 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->context_un.rsp_iocb = rspiocb;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
@@ -1733,12 +1732,9 @@ hba_out:
case SLI_MGMT_RPRT:
case SLI_MGMT_RPA:
{
- lpfc_vpd_t *vp;
struct serv_parm *hsp;
int len = 0;
- vp = &phba->vpd;
-
if (cmdcode == SLI_MGMT_RPRT) {
rh = (struct lpfc_fdmi_reg_hba *)
&CtReq->un.PortID;
@@ -1778,6 +1774,8 @@ hba_out:
ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED);
ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
ae->un.SupportSpeed = 0;
+ if (phba->lmt & LMT_32Gb)
+ ae->un.SupportSpeed |= HBA_PORTSPEED_32GBIT;
if (phba->lmt & LMT_16Gb)
ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT;
if (phba->lmt & LMT_10Gb)
@@ -1821,6 +1819,9 @@ hba_out:
case LPFC_LINK_SPEED_16GHZ:
ae->un.PortSpeed = HBA_PORTSPEED_16GBIT;
break;
+ case LPFC_LINK_SPEED_32GHZ:
+ ae->un.PortSpeed = HBA_PORTSPEED_32GBIT;
+ break;
default:
ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN;
break;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 36bf58ba750a..3feeb447b740 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -457,11 +457,9 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mboxq;
struct lpfc_nodelist *ndlp;
- struct serv_parm *sp;
struct lpfc_dmabuf *dmabuf;
int rc = 0;
- sp = &phba->fc_fabparam;
/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(phba->link_flag & LS_LOOPBACK_MODE) &&
@@ -1028,9 +1026,11 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
stop_rr_fcf_flogi:
/* FLOGI failure */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+ "2858 FLOGI failure Status:x%x/x%x TMO:x%x "
+ "Data x%x x%x\n",
irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout);
+ irsp->ulpTimeout, phba->hba_flag,
+ phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
@@ -1154,6 +1154,9 @@ stop_rr_fcf_flogi:
}
flogifail:
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ spin_unlock_irq(&phba->hbalock);
lpfc_nlp_put(ndlp);
if (!lpfc_error_lost_link(irsp)) {
@@ -1205,14 +1208,11 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm *sp;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
uint8_t *pcmd;
uint16_t cmdsize;
uint32_t tmo;
int rc;
- pring = &phba->sli.ring[LPFC_ELS_RING];
-
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_FLOGI);
@@ -1454,8 +1454,6 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
void
lpfc_more_plogi(struct lpfc_vport *vport)
{
- int sentplogi;
-
if (vport->num_disc_nodes)
vport->num_disc_nodes--;
@@ -1468,7 +1466,7 @@ lpfc_more_plogi(struct lpfc_vport *vport)
/* Check to see if there are more PLOGIs to be sent */
if (vport->fc_flag & FC_NLP_MORE)
/* go thru NPR nodes and issue any remaining ELS PLOGIs */
- sentplogi = lpfc_els_disc_plogi(vport);
+ lpfc_els_disc_plogi(vport);
return;
}
@@ -1956,16 +1954,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
struct serv_parm *sp;
- IOCB_t *icmd;
struct lpfc_nodelist *ndlp;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int ret;
- psli = &phba->sli;
-
ndlp = lpfc_findnode_did(vport, did);
if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
ndlp = NULL;
@@ -1977,7 +1971,6 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For PLOGI request, remainder of payload is service parameters */
@@ -2034,10 +2027,8 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
- struct lpfc_sli *psli;
struct lpfc_nodelist *ndlp;
- psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -2117,7 +2108,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
PRLI *npr;
- IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
@@ -2128,7 +2118,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For PRLI request, remainder of payload is service parameters */
@@ -2413,7 +2402,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
ADISC *ap;
- IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
@@ -2424,7 +2412,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For ADISC request, remainder of payload is service parameters */
@@ -2478,12 +2465,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = ndlp->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
- struct lpfc_sli *psli;
struct lpfcMboxq *mbox;
unsigned long flags;
uint32_t skip_recovery = 0;
- psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -2609,7 +2594,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
@@ -2628,7 +2612,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
pcmd += sizeof(uint32_t);
@@ -2742,14 +2725,11 @@ int
lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
- IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
struct lpfc_nodelist *ndlp;
- psli = &phba->sli;
cmdsize = (sizeof(uint32_t) + sizeof(SCR));
ndlp = lpfc_findnode_did(vport, nportid);
@@ -2776,7 +2756,6 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
return 1;
}
- icmd = &elsiocb->iocb;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
@@ -2836,9 +2815,7 @@ static int
lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
- IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli *psli;
FARP *fp;
uint8_t *pcmd;
uint32_t *lp;
@@ -2846,7 +2823,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
struct lpfc_nodelist *ondlp;
struct lpfc_nodelist *ndlp;
- psli = &phba->sli;
cmdsize = (sizeof(uint32_t) + sizeof(FARP));
ndlp = lpfc_findnode_did(vport, nportid);
@@ -2872,7 +2848,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
return 1;
}
- icmd = &elsiocb->iocb;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
@@ -3922,13 +3897,11 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
ELS_PKT *els_pkt_ptr;
- psli = &phba->sli;
oldcmd = &oldiocb->iocb;
switch (flag) {
@@ -4061,12 +4034,10 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
- psli = &phba->sli;
cmdsize = 2 * sizeof(uint32_t);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -4212,13 +4183,10 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
- psli = &phba->sli;
-
cmdsize = sizeof(uint32_t) + sizeof(PRLI);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
@@ -4315,12 +4283,10 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
RNID *rn;
IOCB_t *icmd, *oldcmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
- psli = &phba->sli;
cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+ (2 * sizeof(struct lpfc_name));
if (format)
@@ -4447,12 +4413,10 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
- psli = &phba->sli;
cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
/* The accumulated length can exceed the BPL_SIZE. For
@@ -4746,6 +4710,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
+ if (phba->lmt & LMT_32Gb)
+ rdp_cap |= RDP_PS_32GB;
if (phba->lmt & LMT_16Gb)
rdp_cap |= RDP_PS_16GB;
if (phba->lmt & LMT_10Gb)
@@ -5181,14 +5147,12 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
- IOCB_t *icmd;
uint8_t *lp;
struct fc_lcb_request_frame *beacon;
struct lpfc_lcb_context *lcb_context;
uint8_t state, rjt_err;
struct ls_rjt stat;
- icmd = &cmdiocb->iocb;
pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
lp = (uint8_t *)pcmd->virt;
beacon = (struct fc_lcb_request_frame *)pcmd->virt;
@@ -5444,7 +5408,7 @@ lpfc_send_rscn_event(struct lpfc_vport *vport,
fc_host_post_vendor_event(shost,
fc_get_event_number(),
- sizeof(struct lpfc_els_event_header) + payload_len,
+ sizeof(struct lpfc_rscn_event_header) + payload_len,
(char *)rscn_event_data,
LPFC_NL_VENDOR_ID);
@@ -5481,13 +5445,11 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
uint32_t *lp, *datap;
- IOCB_t *icmd;
uint32_t payload_len, length, nportid, *cmd;
int rscn_cnt;
int rscn_id = 0, hba_id = 0;
int i;
- icmd = &cmdiocb->iocb;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
@@ -5893,6 +5855,13 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 1;
}
+ /* send our FLOGI first */
+ if (vport->port_state < LPFC_FLOGI) {
+ vport->fc_myDID = 0;
+ lpfc_initial_flogi(vport);
+ vport->fc_myDID = Fabric_DID;
+ }
+
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
@@ -5943,12 +5912,10 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
- IOCB_t *icmd;
RNID *rn;
struct ls_rjt stat;
uint32_t cmd;
- icmd = &cmdiocb->iocb;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
@@ -6259,7 +6226,6 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
- struct lpfc_dmabuf *pcmd;
struct ls_rjt stat;
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
@@ -6267,8 +6233,6 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* reject the unsolicited RPS request and done with it */
goto reject_out;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
@@ -6482,7 +6446,6 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_hba *phba = vport->phba;
struct RRQ *els_rrq;
- IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
@@ -6501,7 +6464,6 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For RRQ request, remainder of payload is Exchange IDs */
@@ -7374,6 +7336,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
"Data: x%x x%x x%x x%x\n",
cmd, did, vport->port_state, vport->fc_flag,
vport->fc_myDID, vport->fc_prevDID);
+
+ /* reject till our FLOGI completes */
+ if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
+ (cmd != ELS_CMD_FLOGI)) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ goto lsrjt;
+ }
+
switch (cmd) {
case ELS_CMD_PLOGI:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -7411,20 +7382,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
- /* We get here, and drop thru, if we are PT2PT with
- * another NPort and the other side has initiated
- * the PLOGI before responding to our FLOGI.
- */
- if (phba->sli_rev == LPFC_SLI_REV4 &&
- (phba->fc_topology_changed ||
- vport->fc_myDID != vport->fc_prevDID)) {
- lpfc_unregister_fcf_prep(phba);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
- phba->fc_topology_changed = 0;
- lpfc_issue_reg_vfi(vport);
- }
}
spin_lock_irq(shost->host_lock);
@@ -7655,6 +7612,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
break;
}
+lsrjt:
/* check if need to LS_RJT received ELS cmd */
if (rjt_err) {
memset(&stat, 0, sizeof(stat));
@@ -8428,7 +8386,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
@@ -8439,7 +8396,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
pcmd += sizeof(uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 759cbebed7c7..bfc2442dd74a 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -800,7 +800,6 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp, *next_ndlp;
- int rc;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
@@ -816,10 +815,10 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
if ((phba->sli_rev < LPFC_SLI_REV4) &&
(!remove && ndlp->nlp_type & NLP_FABRIC))
continue;
- rc = lpfc_disc_state_machine(vport, ndlp, NULL,
- remove
- ? NLP_EVT_DEVICE_RM
- : NLP_EVT_DEVICE_RECOVERY);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ remove
+ ? NLP_EVT_DEVICE_RM
+ : NLP_EVT_DEVICE_RECOVERY);
}
if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -1774,7 +1773,6 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
uint16_t *next_fcf_index)
{
void *virt_addr;
- dma_addr_t phys_addr;
struct lpfc_mbx_sge sge;
struct lpfc_mbx_read_fcf_tbl *read_fcf;
uint32_t shdr_status, shdr_add_status, if_type;
@@ -1785,7 +1783,6 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
* routine only uses a single SGE.
*/
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
- phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
if (unlikely(!mboxq->sge_array)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"2524 Failed to get the non-embedded SGE "
@@ -2977,7 +2974,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
struct lpfc_vport *vport = pmb->vport;
-
+ struct serv_parm *sp = &vport->fc_sparam;
+ uint32_t ed_tov;
/* Check for error */
if (mb->mbxStatus) {
@@ -2992,6 +2990,18 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
sizeof (struct serv_parm));
+
+ ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
+ if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
+ ed_tov = (ed_tov + 999999) / 1000000;
+
+ phba->fc_edtov = ed_tov;
+ phba->fc_ratov = (2 * ed_tov) / 1000;
+ if (phba->fc_ratov < FF_DEF_RATOV) {
+ /* RA_TOV should be atleast 10sec for initial flogi */
+ phba->fc_ratov = FF_DEF_RATOV;
+ }
+
lpfc_update_vport_wwn(vport);
if (vport->port_type == LPFC_PHYSICAL_PORT) {
memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
@@ -3032,6 +3042,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
case LPFC_LINK_SPEED_8GHZ:
case LPFC_LINK_SPEED_10GHZ:
case LPFC_LINK_SPEED_16GHZ:
+ case LPFC_LINK_SPEED_32GHZ:
phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
break;
default:
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 892c5257d87c..2cce88e967ce 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -33,7 +33,7 @@
#define FF_DEF_EDTOV 2000 /* Default E_D_TOV (2000ms) */
#define FF_DEF_ALTOV 15 /* Default AL_TIME (15ms) */
-#define FF_DEF_RATOV 2 /* Default RA_TOV (2s) */
+#define FF_DEF_RATOV 10 /* Default RA_TOV (10s) */
#define FF_DEF_ARBTOV 1900 /* Default ARB_TOV (1900ms) */
#define LPFC_BUF_RING0 64 /* Number of buffers to post to RING
@@ -1400,6 +1400,7 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_LANCER_FC_VF 0xe208
#define PCI_DEVICE_ID_LANCER_FCOE 0xe260
#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
+#define PCI_DEVICE_ID_LANCER_G6_FC 0xe300
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
@@ -2075,6 +2076,7 @@ typedef struct {
#define LINK_SPEED_8G 0x8 /* 8 Gigabaud */
#define LINK_SPEED_10G 0x10 /* 10 Gigabaud */
#define LINK_SPEED_16G 0x11 /* 16 Gigabaud */
+#define LINK_SPEED_32G 0x14 /* 32 Gigabaud */
} INIT_LINK_VAR;
@@ -2246,6 +2248,7 @@ typedef struct {
#define LMT_8Gb 0x080
#define LMT_10Gb 0x100
#define LMT_16Gb 0x200
+#define LMT_32Gb 0x400
uint32_t rsvd2;
uint32_t rsvd3;
uint32_t max_xri;
@@ -2727,6 +2730,7 @@ struct lpfc_mbx_read_top {
#define LPFC_LINK_SPEED_8GHZ 0x20
#define LPFC_LINK_SPEED_10GHZ 0x40
#define LPFC_LINK_SPEED_16GHZ 0x80
+#define LPFC_LINK_SPEED_32GHZ 0x90
};
/* Structure for MB Command CLEAR_LA (22) */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index f962118da8ed..db9446c612da 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -699,7 +699,9 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
!(phba->lmt & LMT_10Gb)) ||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
- !(phba->lmt & LMT_16Gb))) {
+ !(phba->lmt & LMT_16Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
+ !(phba->lmt & LMT_32Gb))) {
/* Reset link speed to auto */
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1302 Invalid speed for this board:%d "
@@ -2035,7 +2037,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
&& descp && descp[0] != '\0')
return;
- if (phba->lmt & LMT_16Gb)
+ if (phba->lmt & LMT_32Gb)
+ max_speed = 32;
+ else if (phba->lmt & LMT_16Gb)
max_speed = 16;
else if (phba->lmt & LMT_10Gb)
max_speed = 10;
@@ -2229,6 +2233,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
m = (typeof(m)){"OCe15100", "PCIe",
"Obsolete, Unsupported FCoE"};
break;
+ case PCI_DEVICE_ID_LANCER_G6_FC:
+ m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
+ break;
case PCI_DEVICE_ID_SKYHAWK:
case PCI_DEVICE_ID_SKYHAWK_VF:
oneConnect = 1;
@@ -2253,7 +2260,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
phba->Port);
else if (max_speed == 0)
snprintf(descp, 255,
- "Emulex %s %s %s ",
+ "Emulex %s %s %s",
m.name, m.bus, m.function);
else
snprintf(descp, 255,
@@ -3491,6 +3498,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
sizeof fc_host_symbolic_name(shost));
fc_host_supported_speeds(shost) = 0;
+ if (phba->lmt & LMT_32Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
if (phba->lmt & LMT_16Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
if (phba->lmt & LMT_10Gb)
@@ -3854,6 +3863,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
case LPFC_FC_LA_SPEED_16G:
port_speed = 16000;
break;
+ case LPFC_FC_LA_SPEED_32G:
+ port_speed = 32000;
+ break;
default:
port_speed = 0;
}
@@ -4982,8 +4994,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
}
if (!phba->sli.ring)
- phba->sli.ring = (struct lpfc_sli_ring *)
- kzalloc(LPFC_SLI3_MAX_RING *
+ phba->sli.ring = kzalloc(LPFC_SLI3_MAX_RING *
sizeof(struct lpfc_sli_ring), GFP_KERNEL);
if (!phba->sli.ring)
return -ENOMEM;
@@ -4995,7 +5006,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
/* Initialize the host templates the configured values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt;
/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
if (phba->cfg_enable_bg) {
@@ -8679,7 +8690,6 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
- struct cpumask *mask;
uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
/* If there is no mapping, just return */
@@ -8773,11 +8783,8 @@ found:
first_cpu = cpu;
/* Now affinitize to the selected CPU */
- mask = &cpup->maskbits;
- cpumask_clear(mask);
- cpumask_set_cpu(cpu, mask);
i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
- vector, mask);
+ vector, get_cpu_mask(cpu));
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3330 Set Affinity: CPU %d channel %d "
@@ -10287,7 +10294,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
struct lpfc_hba *phba;
struct lpfc_vport *vport = NULL;
struct Scsi_Host *shost = NULL;
- int error, ret;
+ int error;
uint32_t cfg_mode, intr_mode;
int adjusted_fcp_io_channel;
@@ -10411,7 +10418,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* check for firmware upgrade or downgrade */
if (phba->cfg_request_firmware_upgrade)
- ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
+ lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
@@ -11354,6 +11361,8 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G6_FC,
+ PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
@@ -11477,6 +11486,7 @@ lpfc_exit(void)
free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
}
kfree(lpfc_used_cpu);
+ idr_destroy(&lpfc_hba_index);
}
module_init(lpfc_init);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 4abb93a83e0f..f87f90e9b7df 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -289,9 +289,7 @@ lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
struct lpfc_dmabuf *mp)
{
MAILBOX_t *mb;
- struct lpfc_sli *psli;
- psli = &phba->sli;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -483,13 +481,11 @@ lpfc_init_link(struct lpfc_hba * phba,
LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
{
lpfc_vpd_t *vpd;
- struct lpfc_sli *psli;
MAILBOX_t *mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- psli = &phba->sli;
switch (topology) {
case FLAGS_TOPOLOGY_MODE_LOOP_PT:
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
@@ -510,6 +506,13 @@ lpfc_init_link(struct lpfc_hba * phba,
break;
}
+ if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
+ mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
+ /* Failover is not tried for Lancer G6 */
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+ phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
+ }
+
/* Enable asynchronous ABTS responses from firmware */
mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
@@ -543,6 +546,10 @@ lpfc_init_link(struct lpfc_hba * phba,
mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
break;
+ case LPFC_USER_LINK_SPEED_32G:
+ mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+ mb->un.varInitLnk.link_speed = LINK_SPEED_32G;
+ break;
case LPFC_USER_LINK_SPEED_AUTO:
default:
mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
@@ -585,9 +592,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
{
struct lpfc_dmabuf *mp;
MAILBOX_t *mb;
- struct lpfc_sli *psli;
- psli = &phba->sli;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -2010,7 +2015,6 @@ lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
uint16_t fcf_index)
{
void *virt_addr;
- dma_addr_t phys_addr;
uint8_t *bytep;
struct lpfc_mbx_sge sge;
uint32_t alloc_len, req_len;
@@ -2039,7 +2043,6 @@ lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
* routine only uses a single SGE.
*/
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
- phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
virt_addr = mboxq->sge_array->addr[0];
read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index af3b38aba65e..ed9a2c80c4aa 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -820,7 +820,6 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_hba *phba;
LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
- MAILBOX_t *mb;
uint16_t rpi;
phba = vport->phba;
@@ -828,7 +827,6 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!(phba->pport->load_flag & FC_UNLOADING) &&
(evt == NLP_EVT_CMPL_REG_LOGIN) &&
(!pmb->u.mb.mbxStatus)) {
- mb = &pmb->u.mb;
rpi = pmb->u.mb.un.varWords[0];
lpfc_release_rpi(phba, vport, rpi);
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index e5eb40d2c512..051b3b3bd625 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1293,7 +1293,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
{
struct scatterlist *sgpe; /* s/g prot entry */
- struct scatterlist *sgde; /* s/g data entry */
struct lpfc_scsi_buf *lpfc_cmd = NULL;
struct scsi_dif_tuple *src = NULL;
struct lpfc_nodelist *ndlp;
@@ -1309,7 +1308,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return 0;
sgpe = scsi_prot_sglist(sc);
- sgde = scsi_sglist(sc);
lba = scsi_get_lba(sc);
/* First check if we need to match the LBA */
@@ -1882,7 +1880,6 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
#endif
uint32_t checking = 1;
uint32_t reftag;
- unsigned blksize;
uint8_t txop, rxop;
status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
@@ -1890,7 +1887,6 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command for pde*/
- blksize = lpfc_cmd_blksize(sc);
reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -2263,7 +2259,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
dma_addr_t physaddr;
int i = 0, num_sge = 0, status;
uint32_t reftag;
- unsigned blksize;
uint8_t txop, rxop;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc;
@@ -2277,7 +2272,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command for pde*/
- blksize = lpfc_cmd_blksize(sc);
reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -2881,7 +2875,7 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
struct scsi_dif_tuple *src = NULL;
uint8_t *data_src = NULL;
- uint16_t guard_tag, guard_type;
+ uint16_t guard_tag;
uint16_t start_app_tag, app_tag;
uint32_t start_ref_tag, ref_tag;
int prot, protsegcnt;
@@ -2922,7 +2916,6 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
data_len = sgde->length;
if ((data_len & (blksize - 1)) == 0)
chk_guard = 1;
- guard_type = scsi_host_get_guard(cmd->device->host);
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
@@ -3908,12 +3901,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *pnode = rdata->pnode;
struct scsi_cmnd *cmd;
- int result;
int depth;
unsigned long flags;
struct lpfc_fast_path_event *fast_path_evt;
struct Scsi_Host *shost;
- uint32_t queue_depth, scsi_id;
uint32_t logit = LOG_FCP;
/* Sanity check on return of outstanding command */
@@ -4095,7 +4086,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
lpfc_update_stats(phba, lpfc_cmd);
- result = cmd->result;
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4132,8 +4122,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
- queue_depth = cmd->device->queue_depth;
- scsi_id = cmd->device->id;
cmd->scsi_done(cmd);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4feb9312a447..f9585cdd8933 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -6696,7 +6696,7 @@ lpfc_mbox_timeout(unsigned long ptr)
* This function checks if any mailbox completions are present on the mailbox
* completion queue.
**/
-bool
+static bool
lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
{
@@ -12491,12 +12491,10 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
- uint32_t eqidx;
/* Get the driver's phba structure from the dev_id */
fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
phba = fcp_eq_hdl->phba;
- eqidx = fcp_eq_hdl->idx;
if (unlikely(!phba))
return IRQ_NONE;
@@ -12831,12 +12829,8 @@ out_fail:
static void __iomem *
lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
{
- struct pci_dev *pdev;
-
if (!phba->pcidev)
return NULL;
- else
- pdev = phba->pcidev;
switch (pci_barset) {
case WQ_PCI_BAR_0_AND_1:
@@ -15920,7 +15914,6 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
LPFC_MBOXQ_t *mboxq;
uint8_t *bytep;
void *virt_addr;
- dma_addr_t phys_addr;
struct lpfc_mbx_sge sge;
uint32_t alloc_len, req_len;
uint32_t fcfindex;
@@ -15953,7 +15946,6 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
* routine only uses a single SGE.
*/
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
- phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
virt_addr = mboxq->sge_array->addr[0];
/*
* Configure the FCF record for FCFI 0. This is the driver's
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index d1a5b057c6f3..1e916e16ce98 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -454,7 +454,6 @@ struct lpfc_vector_map_info {
uint16_t core_id;
uint16_t irq;
uint16_t channel_id;
- struct cpumask maskbits;
};
#define LPFC_VECTOR_MAP_EMPTY 0xffff
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6258d3d7722a..ea53aa664759 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "10.7.0.0."
+#define LPFC_DRIVER_VERSION "11.0.0.0."
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 9f77d23239a2..2c1160c7ec92 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -390,25 +390,57 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
/**
- * scsi_dev_info_list_del_keyed - remove one dev_info list entry.
+ * scsi_dev_info_list_find - find a matching dev_info list entry.
* @vendor: vendor string
* @model: model (product) string
* @key: specify list to use
*
* Description:
- * Remove and destroy one dev_info entry for @vendor, @model
+ * Finds the first dev_info entry matching @vendor, @model
* in list specified by @key.
*
- * Returns: 0 OK, -error on failure.
+ * Returns: pointer to matching entry, or ERR_PTR on failure.
**/
-int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
+ const char *model, int key)
{
- struct scsi_dev_info_list *devinfo, *found = NULL;
+ struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
+ size_t vmax, mmax;
+ const char *vskip, *mskip;
if (IS_ERR(devinfo_table))
- return PTR_ERR(devinfo_table);
+ return (struct scsi_dev_info_list *) devinfo_table;
+
+ /* Prepare for "compatible" matches */
+
+ /*
+ * XXX why skip leading spaces? If an odd INQUIRY
+ * value, that should have been part of the
+ * scsi_static_device_list[] entry, such as " FOO"
+ * rather than "FOO". Since this code is already
+ * here, and we don't know what device it is
+ * trying to work with, leave it as-is.
+ */
+ vmax = 8; /* max length of vendor */
+ vskip = vendor;
+ while (vmax > 0 && *vskip == ' ') {
+ vmax--;
+ vskip++;
+ }
+ /* Also skip trailing spaces */
+ while (vmax > 0 && vskip[vmax - 1] == ' ')
+ --vmax;
+
+ mmax = 16; /* max length of model */
+ mskip = model;
+ while (mmax > 0 && *mskip == ' ') {
+ mmax--;
+ mskip++;
+ }
+ while (mmax > 0 && mskip[mmax - 1] == ' ')
+ --mmax;
list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
dev_info_list) {
@@ -416,61 +448,48 @@ int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
/*
* Behave like the older version of get_device_flags.
*/
- size_t max;
- /*
- * XXX why skip leading spaces? If an odd INQUIRY
- * value, that should have been part of the
- * scsi_static_device_list[] entry, such as " FOO"
- * rather than "FOO". Since this code is already
- * here, and we don't know what device it is
- * trying to work with, leave it as-is.
- */
- max = 8; /* max length of vendor */
- while ((max > 0) && *vendor == ' ') {
- max--;
- vendor++;
- }
- /*
- * XXX removing the following strlen() would be
- * good, using it means that for a an entry not in
- * the list, we scan every byte of every vendor
- * listed in scsi_static_device_list[], and never match
- * a single one (and still have to compare at
- * least the first byte of each vendor).
- */
- if (memcmp(devinfo->vendor, vendor,
- min(max, strlen(devinfo->vendor))))
+ if (memcmp(devinfo->vendor, vskip, vmax) ||
+ devinfo->vendor[vmax])
continue;
- /*
- * Skip spaces again.
- */
- max = 16; /* max length of model */
- while ((max > 0) && *model == ' ') {
- max--;
- model++;
- }
- if (memcmp(devinfo->model, model,
- min(max, strlen(devinfo->model))))
+ if (memcmp(devinfo->model, mskip, mmax) ||
+ devinfo->model[mmax])
continue;
- found = devinfo;
+ return devinfo;
} else {
if (!memcmp(devinfo->vendor, vendor,
sizeof(devinfo->vendor)) &&
!memcmp(devinfo->model, model,
sizeof(devinfo->model)))
- found = devinfo;
+ return devinfo;
}
- if (found)
- break;
}
- if (found) {
- list_del(&found->dev_info_list);
- kfree(found);
- return 0;
- }
+ return ERR_PTR(-ENOENT);
+}
+
+/**
+ * scsi_dev_info_list_del_keyed - remove one dev_info list entry.
+ * @vendor: vendor string
+ * @model: model (product) string
+ * @key: specify list to use
+ *
+ * Description:
+ * Remove and destroy one dev_info entry for @vendor, @model
+ * in list specified by @key.
+ *
+ * Returns: 0 OK, -error on failure.
+ **/
+int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+{
+ struct scsi_dev_info_list *found;
- return -ENOENT;
+ found = scsi_dev_info_list_find(vendor, model, key);
+ if (IS_ERR(found))
+ return PTR_ERR(found);
+
+ list_del(&found->dev_info_list);
+ kfree(found);
+ return 0;
}
EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
@@ -565,64 +584,16 @@ int scsi_get_device_flags_keyed(struct scsi_device *sdev,
int key)
{
struct scsi_dev_info_list *devinfo;
- struct scsi_dev_info_list_table *devinfo_table;
+ int err;
- devinfo_table = scsi_devinfo_lookup_by_key(key);
+ devinfo = scsi_dev_info_list_find(vendor, model, key);
+ if (!IS_ERR(devinfo))
+ return devinfo->flags;
- if (IS_ERR(devinfo_table))
- return PTR_ERR(devinfo_table);
+ err = PTR_ERR(devinfo);
+ if (err != -ENOENT)
+ return err;
- list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
- dev_info_list) {
- if (devinfo->compatible) {
- /*
- * Behave like the older version of get_device_flags.
- */
- size_t max;
- /*
- * XXX why skip leading spaces? If an odd INQUIRY
- * value, that should have been part of the
- * scsi_static_device_list[] entry, such as " FOO"
- * rather than "FOO". Since this code is already
- * here, and we don't know what device it is
- * trying to work with, leave it as-is.
- */
- max = 8; /* max length of vendor */
- while ((max > 0) && *vendor == ' ') {
- max--;
- vendor++;
- }
- /*
- * XXX removing the following strlen() would be
- * good, using it means that for a an entry not in
- * the list, we scan every byte of every vendor
- * listed in scsi_static_device_list[], and never match
- * a single one (and still have to compare at
- * least the first byte of each vendor).
- */
- if (memcmp(devinfo->vendor, vendor,
- min(max, strlen(devinfo->vendor))))
- continue;
- /*
- * Skip spaces again.
- */
- max = 16; /* max length of model */
- while ((max > 0) && *model == ' ') {
- max--;
- model++;
- }
- if (memcmp(devinfo->model, model,
- min(max, strlen(devinfo->model))))
- continue;
- return devinfo->flags;
- } else {
- if (!memcmp(devinfo->vendor, vendor,
- sizeof(devinfo->vendor)) &&
- !memcmp(devinfo->model, model,
- sizeof(devinfo->model)))
- return devinfo->flags;
- }
- }
/* nothing found, return nothing */
if (key != SCSI_DEVINFO_GLOBAL)
return 0;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 4887f317ea58..8b9c2a38d1cc 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -88,6 +88,17 @@ config SPI_BCM2835
is for the regular SPI controller. Slave mode operation is not also
not supported.
+config SPI_BCM2835AUX
+ tristate "BCM2835 SPI auxiliary controller"
+ depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on GPIOLIB
+ help
+ This selects a driver for the Broadcom BCM2835 SPI aux master.
+
+ The BCM2835 contains two types of SPI master controller; the
+ "universal SPI master", and the regular SPI controller.
+ This driver is for the universal/auxiliary SPI controller.
+
config SPI_BFIN5XX
tristate "SPI controller driver for ADI Blackfin5xx"
depends on BLACKFIN && !BF60x
@@ -125,7 +136,7 @@ config SPI_BCM53XX
config SPI_BCM63XX
tristate "Broadcom BCM63xx SPI controller"
- depends on BCM63XX
+ depends on BCM63XX || COMPILE_TEST
help
Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
@@ -304,7 +315,7 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select REGMAP_MMIO
- depends on SOC_VF610 || SOC_LS1021A || COMPILE_TEST
+ depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
mode. VF610 platform uses the controller.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 6a7f6f9d0d1c..31fb7fb2a0b6 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
+obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o
obj-$(CONFIG_SPI_BCM53XX) += spi-bcm53xx.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index bf1f9b32c597..6165bf21d427 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -240,14 +240,9 @@ static int ath79_spi_probe(struct platform_device *pdev)
sp->bitbang.flags = SPI_CS_HIGH;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- ret = -ENOENT;
- goto err_put_master;
- }
-
- sp->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
- if (!sp->base) {
- ret = -ENXIO;
+ sp->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(sp->base)) {
+ ret = PTR_ERR(sp->base);
goto err_put_master;
}
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 63318e2afba1..aebad36391c9 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -773,7 +773,8 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
*plen = len;
- if (atmel_spi_dma_slave_config(as, &slave_config, 8))
+ if (atmel_spi_dma_slave_config(as, &slave_config,
+ xfer->bits_per_word))
goto err_exit;
/* Send both scatterlists */
@@ -871,14 +872,7 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
* Calculate the lowest divider that satisfies the
* constraint, assuming div32/fdiv/mbz == 0.
*/
- if (xfer->speed_hz)
- scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
- else
- /*
- * This can happend if max_speed is null.
- * In this case, we set the lowest possible speed
- */
- scbr = 0xff;
+ scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
/*
* If the resulting divider doesn't fit into the
@@ -1300,14 +1294,12 @@ static int atmel_spi_one_transfer(struct spi_master *master,
return -EINVAL;
}
- if (xfer->bits_per_word) {
- asd = spi->controller_state;
- bits = (asd->csr >> 4) & 0xf;
- if (bits != xfer->bits_per_word - 8) {
- dev_dbg(&spi->dev,
+ asd = spi->controller_state;
+ bits = (asd->csr >> 4) & 0xf;
+ if (bits != xfer->bits_per_word - 8) {
+ dev_dbg(&spi->dev,
"you can't yet change bits_per_word in transfers\n");
- return -ENOPROTOOPT;
- }
+ return -ENOPROTOOPT;
}
/*
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index f45e085c01a6..afd239d6dec1 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -233,13 +233,12 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
unsigned bpw, hz;
u32 cfg, stat;
- bpw = spi->bits_per_word;
- hz = spi->max_speed_hz;
if (t) {
- if (t->bits_per_word)
- bpw = t->bits_per_word;
- if (t->speed_hz)
- hz = t->speed_hz;
+ bpw = t->bits_per_word;
+ hz = t->speed_hz;
+ } else {
+ bpw = spi->bits_per_word;
+ hz = spi->max_speed_hz;
}
if (!hz)
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 3e8eeb23d4e9..cf04960cc3e6 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -777,7 +777,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
goto out_master_put;
}
- bs->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ bs->irq = platform_get_irq(pdev, 0);
if (bs->irq <= 0) {
dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
err = bs->irq ? bs->irq : -ENODEV;
@@ -786,6 +786,12 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
clk_prepare_enable(bs->clk);
+ bcm2835_dma_init(master, &pdev->dev);
+
+ /* initialise the hardware with the default polarities */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
dev_name(&pdev->dev), master);
if (err) {
@@ -793,12 +799,6 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
goto out_clk_disable;
}
- bcm2835_dma_init(master, &pdev->dev);
-
- /* initialise the hardware with the default polarities */
- bcm2835_wr(bs, BCM2835_SPI_CS,
- BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
-
err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
new file mode 100644
index 000000000000..7de6f8472a81
--- /dev/null
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -0,0 +1,512 @@
+/*
+ * Driver for Broadcom BCM2835 auxiliary SPI Controllers
+ *
+ * the driver does not rely on the native chipselects at all
+ * but only uses the gpio type chipselects
+ *
+ * Based on: spi-bcm2835.c
+ *
+ * Copyright (C) 2015 Martin Sperl
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+
+/*
+ * spi register defines
+ *
+ * note there is garbage in the "official" documentation,
+ * so some data is taken from the file:
+ * brcm_usrlib/dag/vmcsx/vcinclude/bcm2708_chip/aux_io.h
+ * inside of:
+ * http://www.broadcom.com/docs/support/videocore/Brcm_Android_ICS_Graphics_Stack.tar.gz
+ */
+
+/* SPI register offsets */
+#define BCM2835_AUX_SPI_CNTL0 0x00
+#define BCM2835_AUX_SPI_CNTL1 0x04
+#define BCM2835_AUX_SPI_STAT 0x08
+#define BCM2835_AUX_SPI_PEEK 0x0C
+#define BCM2835_AUX_SPI_IO 0x20
+#define BCM2835_AUX_SPI_TXHOLD 0x30
+
+/* Bitfields in CNTL0 */
+#define BCM2835_AUX_SPI_CNTL0_SPEED 0xFFF00000
+#define BCM2835_AUX_SPI_CNTL0_SPEED_MAX 0xFFF
+#define BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT 20
+#define BCM2835_AUX_SPI_CNTL0_CS 0x000E0000
+#define BCM2835_AUX_SPI_CNTL0_POSTINPUT 0x00010000
+#define BCM2835_AUX_SPI_CNTL0_VAR_CS 0x00008000
+#define BCM2835_AUX_SPI_CNTL0_VAR_WIDTH 0x00004000
+#define BCM2835_AUX_SPI_CNTL0_DOUTHOLD 0x00003000
+#define BCM2835_AUX_SPI_CNTL0_ENABLE 0x00000800
+#define BCM2835_AUX_SPI_CNTL0_CPHA_IN 0x00000400
+#define BCM2835_AUX_SPI_CNTL0_CLEARFIFO 0x00000200
+#define BCM2835_AUX_SPI_CNTL0_CPHA_OUT 0x00000100
+#define BCM2835_AUX_SPI_CNTL0_CPOL 0x00000080
+#define BCM2835_AUX_SPI_CNTL0_MSBF_OUT 0x00000040
+#define BCM2835_AUX_SPI_CNTL0_SHIFTLEN 0x0000003F
+
+/* Bitfields in CNTL1 */
+#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700
+#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000080
+#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000040
+#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002
+#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001
+
+/* Bitfields in STAT */
+#define BCM2835_AUX_SPI_STAT_TX_LVL 0xFF000000
+#define BCM2835_AUX_SPI_STAT_RX_LVL 0x00FF0000
+#define BCM2835_AUX_SPI_STAT_TX_FULL 0x00000400
+#define BCM2835_AUX_SPI_STAT_TX_EMPTY 0x00000200
+#define BCM2835_AUX_SPI_STAT_RX_FULL 0x00000100
+#define BCM2835_AUX_SPI_STAT_RX_EMPTY 0x00000080
+#define BCM2835_AUX_SPI_STAT_BUSY 0x00000040
+#define BCM2835_AUX_SPI_STAT_BITCOUNT 0x0000003F
+
+/* timeout values */
+#define BCM2835_AUX_SPI_POLLING_LIMIT_US 30
+#define BCM2835_AUX_SPI_POLLING_JIFFIES 2
+
+#define BCM2835_AUX_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
+ | SPI_NO_CS)
+
+struct bcm2835aux_spi {
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ u32 cntl[2];
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int tx_len;
+ int rx_len;
+ int pending;
+};
+
+static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned reg)
+{
+ return readl(bs->regs + reg);
+}
+
+static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned reg,
+ u32 val)
+{
+ writel(val, bs->regs + reg);
+}
+
+static inline void bcm2835aux_rd_fifo(struct bcm2835aux_spi *bs)
+{
+ u32 data;
+ int count = min(bs->rx_len, 3);
+
+ data = bcm2835aux_rd(bs, BCM2835_AUX_SPI_IO);
+ if (bs->rx_buf) {
+ switch (count) {
+ case 4:
+ *bs->rx_buf++ = (data >> 24) & 0xff;
+ /* fallthrough */
+ case 3:
+ *bs->rx_buf++ = (data >> 16) & 0xff;
+ /* fallthrough */
+ case 2:
+ *bs->rx_buf++ = (data >> 8) & 0xff;
+ /* fallthrough */
+ case 1:
+ *bs->rx_buf++ = (data >> 0) & 0xff;
+ /* fallthrough - no default */
+ }
+ }
+ bs->rx_len -= count;
+ bs->pending -= count;
+}
+
+static inline void bcm2835aux_wr_fifo(struct bcm2835aux_spi *bs)
+{
+ u32 data;
+ u8 byte;
+ int count;
+ int i;
+
+ /* gather up to 3 bytes to write to the FIFO */
+ count = min(bs->tx_len, 3);
+ data = 0;
+ for (i = 0; i < count; i++) {
+ byte = bs->tx_buf ? *bs->tx_buf++ : 0;
+ data |= byte << (8 * (2 - i));
+ }
+
+ /* and set the variable bit-length */
+ data |= (count * 8) << 24;
+
+ /* and decrement length */
+ bs->tx_len -= count;
+ bs->pending += count;
+
+ /* write to the correct TX-register */
+ if (bs->tx_len)
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_TXHOLD, data);
+ else
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_IO, data);
+}
+
+static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
+{
+ /* disable spi clearing fifo and interrupts */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, 0);
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0,
+ BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
+}
+
+static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ irqreturn_t ret = IRQ_NONE;
+
+ /* check if we have data to read */
+ while (bs->rx_len &&
+ (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
+ BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
+ bcm2835aux_rd_fifo(bs);
+ ret = IRQ_HANDLED;
+ }
+
+ /* check if we have data to write */
+ while (bs->tx_len &&
+ (bs->pending < 12) &&
+ (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
+ BCM2835_AUX_SPI_STAT_TX_FULL))) {
+ bcm2835aux_wr_fifo(bs);
+ ret = IRQ_HANDLED;
+ }
+
+ /* and check if we have reached "done" */
+ while (bs->rx_len &&
+ (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
+ BCM2835_AUX_SPI_STAT_BUSY))) {
+ bcm2835aux_rd_fifo(bs);
+ ret = IRQ_HANDLED;
+ }
+
+ /* and if rx_len is 0 then wake up completion and disable spi */
+ if (!bs->rx_len) {
+ bcm2835aux_spi_reset_hw(bs);
+ complete(&master->xfer_completion);
+ }
+
+ /* and return */
+ return ret;
+}
+
+static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ /* enable interrupts */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1] |
+ BCM2835_AUX_SPI_CNTL1_TXEMPTY |
+ BCM2835_AUX_SPI_CNTL1_IDLE);
+
+ /* and wait for finish... */
+ return 1;
+}
+
+static int bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ /* fill in registers and fifos before enabling interrupts */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
+
+ /* fill in tx fifo with data before enabling interrupts */
+ while ((bs->tx_len) &&
+ (bs->pending < 12) &&
+ (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
+ BCM2835_AUX_SPI_STAT_TX_FULL))) {
+ bcm2835aux_wr_fifo(bs);
+ }
+
+ /* now run the interrupt mode */
+ return __bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
+}
+
+static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ unsigned long timeout;
+ u32 stat;
+
+ /* configure spi */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
+
+ /* set the timeout */
+ timeout = jiffies + BCM2835_AUX_SPI_POLLING_JIFFIES;
+
+ /* loop until finished the transfer */
+ while (bs->rx_len) {
+ /* read status */
+ stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
+
+ /* fill in tx fifo with remaining data */
+ if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
+ bcm2835aux_wr_fifo(bs);
+ continue;
+ }
+
+ /* read data from fifo for both cases */
+ if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
+ bcm2835aux_rd_fifo(bs);
+ continue;
+ }
+ if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
+ bcm2835aux_rd_fifo(bs);
+ continue;
+ }
+
+ /* there is still data pending to read check the timeout */
+ if (bs->rx_len && time_after(jiffies, timeout)) {
+ dev_dbg_ratelimited(&spi->dev,
+ "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
+ jiffies - timeout,
+ bs->tx_len, bs->rx_len);
+ /* forward to interrupt handler */
+ return __bcm2835aux_spi_transfer_one_irq(master,
+ spi, tfr);
+ }
+ }
+
+ /* Transfer complete - reset SPI HW */
+ bcm2835aux_spi_reset_hw(bs);
+
+ /* and return without waiting for completion */
+ return 0;
+}
+
+static int bcm2835aux_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ unsigned long spi_hz, clk_hz, speed;
+ unsigned long spi_used_hz;
+ unsigned long long xfer_time_us;
+
+ /* calculate the registers to handle
+ *
+ * note that we use the variable data mode, which
+ * is not optimal for longer transfers as we waste registers
+ * resulting (potentially) in more interrupts when transferring
+ * more than 12 bytes
+ */
+ bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE |
+ BCM2835_AUX_SPI_CNTL0_VAR_WIDTH |
+ BCM2835_AUX_SPI_CNTL0_MSBF_OUT;
+ bs->cntl[1] = BCM2835_AUX_SPI_CNTL1_MSBF_IN;
+
+ /* set clock */
+ spi_hz = tfr->speed_hz;
+ clk_hz = clk_get_rate(bs->clk);
+
+ if (spi_hz >= clk_hz / 2) {
+ speed = 0;
+ } else if (spi_hz) {
+ speed = DIV_ROUND_UP(clk_hz, 2 * spi_hz) - 1;
+ if (speed > BCM2835_AUX_SPI_CNTL0_SPEED_MAX)
+ speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
+ } else { /* the slowest we can go */
+ speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
+ }
+ bs->cntl[0] |= speed << BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT;
+
+ spi_used_hz = clk_hz / (2 * (speed + 1));
+
+ /* handle all the modes */
+ if (spi->mode & SPI_CPOL)
+ bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPOL;
+ if (spi->mode & SPI_CPHA)
+ bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPHA_OUT |
+ BCM2835_AUX_SPI_CNTL0_CPHA_IN;
+
+ /* set transmit buffers and length */
+ bs->tx_buf = tfr->tx_buf;
+ bs->rx_buf = tfr->rx_buf;
+ bs->tx_len = tfr->len;
+ bs->rx_len = tfr->len;
+ bs->pending = 0;
+
+ /* calculate the estimated time in us the transfer runs
+ * note that there are are 2 idle clocks after each
+ * chunk getting transferred - in our case the chunk size
+ * is 3 bytes, so we approximate this by 9 bits/byte
+ */
+ xfer_time_us = tfr->len * 9 * 1000000;
+ do_div(xfer_time_us, spi_used_hz);
+
+ /* run in polling mode for short transfers */
+ if (xfer_time_us < BCM2835_AUX_SPI_POLLING_LIMIT_US)
+ return bcm2835aux_spi_transfer_one_poll(master, spi, tfr);
+
+ /* run in interrupt mode for all others */
+ return bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
+}
+
+static void bcm2835aux_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ bcm2835aux_spi_reset_hw(bs);
+}
+
+static int bcm2835aux_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcm2835aux_spi *bs;
+ struct resource *res;
+ unsigned long clk_hz;
+ int err;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master) {
+ dev_err(&pdev->dev, "spi_alloc_master() failed\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+ master->mode_bits = BCM2835_AUX_SPI_MODE_BITS;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->num_chipselect = -1;
+ master->transfer_one = bcm2835aux_spi_transfer_one;
+ master->handle_err = bcm2835aux_spi_handle_err;
+ master->dev.of_node = pdev->dev.of_node;
+
+ bs = spi_master_get_devdata(master);
+
+ /* the main area */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bs->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(bs->regs)) {
+ err = PTR_ERR(bs->regs);
+ goto out_master_put;
+ }
+
+ bs->clk = devm_clk_get(&pdev->dev, NULL);
+ if ((!bs->clk) || (IS_ERR(bs->clk))) {
+ err = PTR_ERR(bs->clk);
+ dev_err(&pdev->dev, "could not get clk: %d\n", err);
+ goto out_master_put;
+ }
+
+ bs->irq = platform_get_irq(pdev, 0);
+ if (bs->irq <= 0) {
+ dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
+ err = bs->irq ? bs->irq : -ENODEV;
+ goto out_master_put;
+ }
+
+ /* this also enables the HW block */
+ err = clk_prepare_enable(bs->clk);
+ if (err) {
+ dev_err(&pdev->dev, "could not prepare clock: %d\n", err);
+ goto out_master_put;
+ }
+
+ /* just checking if the clock returns a sane value */
+ clk_hz = clk_get_rate(bs->clk);
+ if (!clk_hz) {
+ dev_err(&pdev->dev, "clock returns 0 Hz\n");
+ err = -ENODEV;
+ goto out_clk_disable;
+ }
+
+ /* reset SPI-HW block */
+ bcm2835aux_spi_reset_hw(bs);
+
+ err = devm_request_irq(&pdev->dev, bs->irq,
+ bcm2835aux_spi_interrupt,
+ IRQF_SHARED,
+ dev_name(&pdev->dev), master);
+ if (err) {
+ dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
+ goto out_clk_disable;
+ }
+
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err) {
+ dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
+ goto out_clk_disable;
+ }
+
+ return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(bs->clk);
+out_master_put:
+ spi_master_put(master);
+ return err;
+}
+
+static int bcm2835aux_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ bcm2835aux_spi_reset_hw(bs);
+
+ /* disable the HW block by releasing the clock */
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835aux_spi_match[] = {
+ { .compatible = "brcm,bcm2835-aux-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bcm2835aux_spi_match);
+
+static struct platform_driver bcm2835aux_spi_driver = {
+ .driver = {
+ .name = "spi-bcm2835aux",
+ .of_match_table = bcm2835aux_spi_match,
+ },
+ .probe = bcm2835aux_spi_probe,
+ .remove = bcm2835aux_spi_remove,
+};
+module_platform_driver(bcm2835aux_spi_driver);
+
+MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835 aux");
+MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c
index 1520554978a3..cc3f938f0a6b 100644
--- a/drivers/spi/spi-bcm53xx.c
+++ b/drivers/spi/spi-bcm53xx.c
@@ -247,28 +247,19 @@ static int bcm53xxspi_bcma_probe(struct bcma_device *core)
if (err) {
spi_master_put(master);
bcma_set_drvdata(core, NULL);
- goto out;
+ return err;
}
/* Broadcom SoCs (at least with the CC rev 42) use SPI for flash only */
spi_new_device(master, &bcm53xx_info);
-out:
- return err;
-}
-
-static void bcm53xxspi_bcma_remove(struct bcma_device *core)
-{
- struct bcm53xxspi *b53spi = bcma_get_drvdata(core);
-
- spi_unregister_master(b53spi->master);
+ return 0;
}
static struct bcma_driver bcm53xxspi_bcma_driver = {
.name = KBUILD_MODNAME,
.id_table = bcm53xxspi_bcma_tbl,
.probe = bcm53xxspi_bcma_probe,
- .remove = bcm53xxspi_bcma_remove,
};
/**************************************************
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index e73e2b052c9c..06858e04ec59 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -27,10 +27,117 @@
#include <linux/err.h>
#include <linux/pm_runtime.h>
-#include <bcm63xx_dev_spi.h>
+/* BCM 6338/6348 SPI core */
+#define SPI_6348_RSET_SIZE 64
+#define SPI_6348_CMD 0x00 /* 16-bits register */
+#define SPI_6348_INT_STATUS 0x02
+#define SPI_6348_INT_MASK_ST 0x03
+#define SPI_6348_INT_MASK 0x04
+#define SPI_6348_ST 0x05
+#define SPI_6348_CLK_CFG 0x06
+#define SPI_6348_FILL_BYTE 0x07
+#define SPI_6348_MSG_TAIL 0x09
+#define SPI_6348_RX_TAIL 0x0b
+#define SPI_6348_MSG_CTL 0x40 /* 8-bits register */
+#define SPI_6348_MSG_CTL_WIDTH 8
+#define SPI_6348_MSG_DATA 0x41
+#define SPI_6348_MSG_DATA_SIZE 0x3f
+#define SPI_6348_RX_DATA 0x80
+#define SPI_6348_RX_DATA_SIZE 0x3f
+
+/* BCM 3368/6358/6262/6368 SPI core */
+#define SPI_6358_RSET_SIZE 1804
+#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
+#define SPI_6358_MSG_CTL_WIDTH 16
+#define SPI_6358_MSG_DATA 0x02
+#define SPI_6358_MSG_DATA_SIZE 0x21e
+#define SPI_6358_RX_DATA 0x400
+#define SPI_6358_RX_DATA_SIZE 0x220
+#define SPI_6358_CMD 0x700 /* 16-bits register */
+#define SPI_6358_INT_STATUS 0x702
+#define SPI_6358_INT_MASK_ST 0x703
+#define SPI_6358_INT_MASK 0x704
+#define SPI_6358_ST 0x705
+#define SPI_6358_CLK_CFG 0x706
+#define SPI_6358_FILL_BYTE 0x707
+#define SPI_6358_MSG_TAIL 0x709
+#define SPI_6358_RX_TAIL 0x70B
+
+/* Shared SPI definitions */
+
+/* Message configuration */
+#define SPI_FD_RW 0x00
+#define SPI_HD_W 0x01
+#define SPI_HD_R 0x02
+#define SPI_BYTE_CNT_SHIFT 0
+#define SPI_6348_MSG_TYPE_SHIFT 6
+#define SPI_6358_MSG_TYPE_SHIFT 14
+
+/* Command */
+#define SPI_CMD_NOOP 0x00
+#define SPI_CMD_SOFT_RESET 0x01
+#define SPI_CMD_HARD_RESET 0x02
+#define SPI_CMD_START_IMMEDIATE 0x03
+#define SPI_CMD_COMMAND_SHIFT 0
+#define SPI_CMD_COMMAND_MASK 0x000f
+#define SPI_CMD_DEVICE_ID_SHIFT 4
+#define SPI_CMD_PREPEND_BYTE_CNT_SHIFT 8
+#define SPI_CMD_ONE_BYTE_SHIFT 11
+#define SPI_CMD_ONE_WIRE_SHIFT 12
+#define SPI_DEV_ID_0 0
+#define SPI_DEV_ID_1 1
+#define SPI_DEV_ID_2 2
+#define SPI_DEV_ID_3 3
+
+/* Interrupt mask */
+#define SPI_INTR_CMD_DONE 0x01
+#define SPI_INTR_RX_OVERFLOW 0x02
+#define SPI_INTR_TX_UNDERFLOW 0x04
+#define SPI_INTR_TX_OVERFLOW 0x08
+#define SPI_INTR_RX_UNDERFLOW 0x10
+#define SPI_INTR_CLEAR_ALL 0x1f
+
+/* Status */
+#define SPI_RX_EMPTY 0x02
+#define SPI_CMD_BUSY 0x04
+#define SPI_SERIAL_BUSY 0x08
+
+/* Clock configuration */
+#define SPI_CLK_20MHZ 0x00
+#define SPI_CLK_0_391MHZ 0x01
+#define SPI_CLK_0_781MHZ 0x02 /* default */
+#define SPI_CLK_1_563MHZ 0x03
+#define SPI_CLK_3_125MHZ 0x04
+#define SPI_CLK_6_250MHZ 0x05
+#define SPI_CLK_12_50MHZ 0x06
+#define SPI_CLK_MASK 0x07
+#define SPI_SSOFFTIME_MASK 0x38
+#define SPI_SSOFFTIME_SHIFT 3
+#define SPI_BYTE_SWAP 0x80
+
+enum bcm63xx_regs_spi {
+ SPI_CMD,
+ SPI_INT_STATUS,
+ SPI_INT_MASK_ST,
+ SPI_INT_MASK,
+ SPI_ST,
+ SPI_CLK_CFG,
+ SPI_FILL_BYTE,
+ SPI_MSG_TAIL,
+ SPI_RX_TAIL,
+ SPI_MSG_CTL,
+ SPI_MSG_DATA,
+ SPI_RX_DATA,
+ SPI_MSG_TYPE_SHIFT,
+ SPI_MSG_CTL_WIDTH,
+ SPI_MSG_DATA_SIZE,
+};
#define BCM63XX_SPI_MAX_PREPEND 15
+#define BCM63XX_SPI_MAX_CS 8
+#define BCM63XX_SPI_BUS_NUM 0
+
struct bcm63xx_spi {
struct completion done;
@@ -38,6 +145,7 @@ struct bcm63xx_spi {
int irq;
/* Platform data */
+ const unsigned long *reg_offsets;
unsigned fifo_size;
unsigned int msg_type_shift;
unsigned int msg_ctl_width;
@@ -51,27 +159,35 @@ struct bcm63xx_spi {
};
static inline u8 bcm_spi_readb(struct bcm63xx_spi *bs,
- unsigned int offset)
+ unsigned int offset)
{
- return bcm_readb(bs->regs + bcm63xx_spireg(offset));
+ return readb(bs->regs + bs->reg_offsets[offset]);
}
static inline u16 bcm_spi_readw(struct bcm63xx_spi *bs,
unsigned int offset)
{
- return bcm_readw(bs->regs + bcm63xx_spireg(offset));
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ return ioread16be(bs->regs + bs->reg_offsets[offset]);
+#else
+ return readw(bs->regs + bs->reg_offsets[offset]);
+#endif
}
static inline void bcm_spi_writeb(struct bcm63xx_spi *bs,
u8 value, unsigned int offset)
{
- bcm_writeb(value, bs->regs + bcm63xx_spireg(offset));
+ writeb(value, bs->regs + bs->reg_offsets[offset]);
}
static inline void bcm_spi_writew(struct bcm63xx_spi *bs,
u16 value, unsigned int offset)
{
- bcm_writew(value, bs->regs + bcm63xx_spireg(offset));
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ iowrite16be(value, bs->regs + bs->reg_offsets[offset]);
+#else
+ writew(value, bs->regs + bs->reg_offsets[offset]);
+#endif
}
static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
@@ -122,7 +238,6 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
u16 msg_ctl;
u16 cmd;
- u8 rx_tail;
unsigned int i, timeout = 0, prepend_len = 0, len = 0;
struct spi_transfer *t = first;
bool do_rx = false;
@@ -314,18 +429,71 @@ static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static const unsigned long bcm6348_spi_reg_offsets[] = {
+ [SPI_CMD] = SPI_6348_CMD,
+ [SPI_INT_STATUS] = SPI_6348_INT_STATUS,
+ [SPI_INT_MASK_ST] = SPI_6348_INT_MASK_ST,
+ [SPI_INT_MASK] = SPI_6348_INT_MASK,
+ [SPI_ST] = SPI_6348_ST,
+ [SPI_CLK_CFG] = SPI_6348_CLK_CFG,
+ [SPI_FILL_BYTE] = SPI_6348_FILL_BYTE,
+ [SPI_MSG_TAIL] = SPI_6348_MSG_TAIL,
+ [SPI_RX_TAIL] = SPI_6348_RX_TAIL,
+ [SPI_MSG_CTL] = SPI_6348_MSG_CTL,
+ [SPI_MSG_DATA] = SPI_6348_MSG_DATA,
+ [SPI_RX_DATA] = SPI_6348_RX_DATA,
+ [SPI_MSG_TYPE_SHIFT] = SPI_6348_MSG_TYPE_SHIFT,
+ [SPI_MSG_CTL_WIDTH] = SPI_6348_MSG_CTL_WIDTH,
+ [SPI_MSG_DATA_SIZE] = SPI_6348_MSG_DATA_SIZE,
+};
+
+static const unsigned long bcm6358_spi_reg_offsets[] = {
+ [SPI_CMD] = SPI_6358_CMD,
+ [SPI_INT_STATUS] = SPI_6358_INT_STATUS,
+ [SPI_INT_MASK_ST] = SPI_6358_INT_MASK_ST,
+ [SPI_INT_MASK] = SPI_6358_INT_MASK,
+ [SPI_ST] = SPI_6358_ST,
+ [SPI_CLK_CFG] = SPI_6358_CLK_CFG,
+ [SPI_FILL_BYTE] = SPI_6358_FILL_BYTE,
+ [SPI_MSG_TAIL] = SPI_6358_MSG_TAIL,
+ [SPI_RX_TAIL] = SPI_6358_RX_TAIL,
+ [SPI_MSG_CTL] = SPI_6358_MSG_CTL,
+ [SPI_MSG_DATA] = SPI_6358_MSG_DATA,
+ [SPI_RX_DATA] = SPI_6358_RX_DATA,
+ [SPI_MSG_TYPE_SHIFT] = SPI_6358_MSG_TYPE_SHIFT,
+ [SPI_MSG_CTL_WIDTH] = SPI_6358_MSG_CTL_WIDTH,
+ [SPI_MSG_DATA_SIZE] = SPI_6358_MSG_DATA_SIZE,
+};
+
+static const struct platform_device_id bcm63xx_spi_dev_match[] = {
+ {
+ .name = "bcm6348-spi",
+ .driver_data = (unsigned long)bcm6348_spi_reg_offsets,
+ },
+ {
+ .name = "bcm6358-spi",
+ .driver_data = (unsigned long)bcm6358_spi_reg_offsets,
+ },
+ {
+ },
+};
static int bcm63xx_spi_probe(struct platform_device *pdev)
{
struct resource *r;
+ const unsigned long *bcm63xx_spireg;
struct device *dev = &pdev->dev;
- struct bcm63xx_spi_pdata *pdata = dev_get_platdata(&pdev->dev);
int irq;
struct spi_master *master;
struct clk *clk;
struct bcm63xx_spi *bs;
int ret;
+ if (!pdev->id_entry->driver_data)
+ return -EINVAL;
+
+ bcm63xx_spireg = (const unsigned long *)pdev->id_entry->driver_data;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "no irq\n");
@@ -359,7 +527,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
bs->irq = irq;
bs->clk = clk;
- bs->fifo_size = pdata->fifo_size;
+ bs->reg_offsets = bcm63xx_spireg;
+ bs->fifo_size = bs->reg_offsets[SPI_MSG_DATA_SIZE];
ret = devm_request_irq(&pdev->dev, irq, bcm63xx_spi_interrupt, 0,
pdev->name, master);
@@ -368,26 +537,16 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
goto out_err;
}
- master->bus_num = pdata->bus_num;
- master->num_chipselect = pdata->num_chipselect;
+ master->bus_num = BCM63XX_SPI_BUS_NUM;
+ master->num_chipselect = BCM63XX_SPI_MAX_CS;
master->transfer_one_message = bcm63xx_spi_transfer_one;
master->mode_bits = MODEBITS;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->auto_runtime_pm = true;
- bs->msg_type_shift = pdata->msg_type_shift;
- bs->msg_ctl_width = pdata->msg_ctl_width;
- bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
- bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
-
- switch (bs->msg_ctl_width) {
- case 8:
- case 16:
- break;
- default:
- dev_err(dev, "unsupported MSG_CTL width: %d\n",
- bs->msg_ctl_width);
- goto out_err;
- }
+ bs->msg_type_shift = bs->reg_offsets[SPI_MSG_TYPE_SHIFT];
+ bs->msg_ctl_width = bs->reg_offsets[SPI_MSG_CTL_WIDTH];
+ bs->tx_io = (u8 *)(bs->regs + bs->reg_offsets[SPI_MSG_DATA]);
+ bs->rx_io = (const u8 *)(bs->regs + bs->reg_offsets[SPI_RX_DATA]);
/* Initialize hardware */
ret = clk_prepare_enable(bs->clk);
@@ -467,6 +626,7 @@ static struct platform_driver bcm63xx_spi_driver = {
.name = "bcm63xx-spi",
.pm = &bcm63xx_spi_pm_ops,
},
+ .id_table = bcm63xx_spi_dev_match,
.probe = bcm63xx_spi_probe,
.remove = bcm63xx_spi_remove,
};
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index a78693189f45..6c967555a56a 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -352,10 +352,7 @@ bfin_sport_spi_pump_transfers(unsigned long data)
transfer = drv_data->cur_transfer;
chip = drv_data->cur_chip;
- if (transfer->speed_hz)
- transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
- else
- transfer_speed = chip->baud;
+ transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
bfin_write(&drv_data->regs->tclkdiv, transfer_speed);
SSYNC();
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index a3d65b4f4944..1e91325bf39c 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -661,11 +661,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
message->state = RUNNING_STATE;
dma_config = 0;
- /* Speed setup (surely valid because already checked) */
- if (transfer->speed_hz)
- bfin_write(&drv_data->regs->baud, hz_to_spi_baud(transfer->speed_hz));
- else
- bfin_write(&drv_data->regs->baud, chip->baud);
+ bfin_write(&drv_data->regs->baud, hz_to_spi_baud(transfer->speed_hz));
bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
bfin_spi_cs_active(drv_data, chip);
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 840a4984d365..3aa9e6e3dac8 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -24,6 +24,8 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
+#define SPI_BITBANG_CS_DELAY 100
+
/*----------------------------------------------------------------------*/
@@ -180,7 +182,6 @@ int spi_bitbang_setup(struct spi_device *spi)
{
struct spi_bitbang_cs *cs = spi->controller_state;
struct spi_bitbang *bitbang;
- unsigned long flags;
bitbang = spi_master_get_devdata(spi->master);
@@ -210,12 +211,12 @@ int spi_bitbang_setup(struct spi_device *spi)
*/
/* deselect chip (low or high) */
- spin_lock_irqsave(&bitbang->lock, flags);
+ mutex_lock(&bitbang->lock);
if (!bitbang->busy) {
bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
ndelay(cs->nsecs);
}
- spin_unlock_irqrestore(&bitbang->lock, flags);
+ mutex_unlock(&bitbang->lock);
return 0;
}
@@ -255,122 +256,39 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
static int spi_bitbang_prepare_hardware(struct spi_master *spi)
{
struct spi_bitbang *bitbang;
- unsigned long flags;
bitbang = spi_master_get_devdata(spi);
- spin_lock_irqsave(&bitbang->lock, flags);
+ mutex_lock(&bitbang->lock);
bitbang->busy = 1;
- spin_unlock_irqrestore(&bitbang->lock, flags);
+ mutex_unlock(&bitbang->lock);
return 0;
}
static int spi_bitbang_transfer_one(struct spi_master *master,
- struct spi_message *m)
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
{
- struct spi_bitbang *bitbang;
- unsigned nsecs;
- struct spi_transfer *t = NULL;
- unsigned cs_change;
- int status;
- int do_setup = -1;
- struct spi_device *spi = m->spi;
-
- bitbang = spi_master_get_devdata(master);
-
- /* FIXME this is made-up ... the correct value is known to
- * word-at-a-time bitbang code, and presumably chipselect()
- * should enforce these requirements too?
- */
- nsecs = 100;
-
- cs_change = 1;
- status = 0;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
-
- /* override speed or wordsize? */
- if (t->speed_hz || t->bits_per_word)
- do_setup = 1;
-
- /* init (-1) or override (1) transfer params */
- if (do_setup != 0) {
- if (bitbang->setup_transfer) {
- status = bitbang->setup_transfer(spi, t);
- if (status < 0)
- break;
- }
- if (do_setup == -1)
- do_setup = 0;
- }
-
- /* set up default clock polarity, and activate chip;
- * this implicitly updates clock and spi modes as
- * previously recorded for this device via setup().
- * (and also deselects any other chip that might be
- * selected ...)
- */
- if (cs_change) {
- bitbang->chipselect(spi, BITBANG_CS_ACTIVE);
- ndelay(nsecs);
- }
- cs_change = t->cs_change;
- if (!t->tx_buf && !t->rx_buf && t->len) {
- status = -EINVAL;
- break;
- }
-
- /* transfer data. the lower level code handles any
- * new dma mappings it needs. our caller always gave
- * us dma-safe buffers.
- */
- if (t->len) {
- /* REVISIT dma API still needs a designated
- * DMA_ADDR_INVALID; ~0 might be better.
- */
- if (!m->is_dma_mapped)
- t->rx_dma = t->tx_dma = 0;
- status = bitbang->txrx_bufs(spi, t);
- }
- if (status > 0)
- m->actual_length += status;
- if (status != t->len) {
- /* always report some kind of error */
- if (status >= 0)
- status = -EREMOTEIO;
- break;
- }
- status = 0;
+ struct spi_bitbang *bitbang = spi_master_get_devdata(master);
+ int status = 0;
- /* protocol tweaks before next transfer */
- if (t->delay_usecs)
- udelay(t->delay_usecs);
-
- if (cs_change &&
- !list_is_last(&t->transfer_list, &m->transfers)) {
- /* sometimes a short mid-message deselect of the chip
- * may be needed to terminate a mode or command
- */
- ndelay(nsecs);
- bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
- ndelay(nsecs);
- }
+ if (bitbang->setup_transfer) {
+ status = bitbang->setup_transfer(spi, transfer);
+ if (status < 0)
+ goto out;
}
- m->status = status;
+ if (transfer->len)
+ status = bitbang->txrx_bufs(spi, transfer);
- /* normally deactivate chipselect ... unless no error and
- * cs_change has hinted that the next message will probably
- * be for this chip too.
- */
- if (!(status == 0 && cs_change)) {
- ndelay(nsecs);
- bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
- ndelay(nsecs);
- }
+ if (status == transfer->len)
+ status = 0;
+ else if (status >= 0)
+ status = -EREMOTEIO;
- spi_finalize_current_message(master);
+out:
+ spi_finalize_current_transfer(master);
return status;
}
@@ -378,17 +296,32 @@ static int spi_bitbang_transfer_one(struct spi_master *master,
static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
{
struct spi_bitbang *bitbang;
- unsigned long flags;
bitbang = spi_master_get_devdata(spi);
- spin_lock_irqsave(&bitbang->lock, flags);
+ mutex_lock(&bitbang->lock);
bitbang->busy = 0;
- spin_unlock_irqrestore(&bitbang->lock, flags);
+ mutex_unlock(&bitbang->lock);
return 0;
}
+static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
+{
+ struct spi_bitbang *bitbang = spi_master_get_devdata(spi->master);
+
+ /* SPI core provides CS high / low, but bitbang driver
+ * expects CS active
+ * spi device driver takes care of handling SPI_CS_HIGH
+ */
+ enable = (!!(spi->mode & SPI_CS_HIGH) == enable);
+
+ ndelay(SPI_BITBANG_CS_DELAY);
+ bitbang->chipselect(spi, enable ? BITBANG_CS_ACTIVE :
+ BITBANG_CS_INACTIVE);
+ ndelay(SPI_BITBANG_CS_DELAY);
+}
+
/*----------------------------------------------------------------------*/
/**
@@ -427,7 +360,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
if (!master || !bitbang->chipselect)
return -EINVAL;
- spin_lock_init(&bitbang->lock);
+ mutex_init(&bitbang->lock);
if (!master->mode_bits)
master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
@@ -437,7 +370,8 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
- master->transfer_one_message = spi_bitbang_transfer_one;
+ master->transfer_one = spi_bitbang_transfer_one;
+ master->set_cs = spi_bitbang_set_cs;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 688956ff5095..23f6fffd75e1 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -420,19 +420,20 @@ static int mcfqspi_probe(struct platform_device *pdev)
master->auto_runtime_pm = true;
platform_set_drvdata(pdev, master);
+ pm_runtime_enable(&pdev->dev);
status = devm_spi_register_master(&pdev->dev, master);
if (status) {
dev_dbg(&pdev->dev, "spi_register_master failed\n");
goto fail2;
}
- pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
return 0;
fail2:
+ pm_runtime_disable(&pdev->dev);
mcfqspi_cs_teardown(mcfqspi);
fail1:
clk_disable(mcfqspi->clk);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index a85d863d4a44..7d3af3eacf57 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -215,18 +215,10 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
struct davinci_spi_config *spicfg = spi->controller_data;
u8 chip_sel = spi->chip_select;
u16 spidat1 = CS_DEFAULT;
- bool gpio_chipsel = false;
- int gpio;
dspi = spi_master_get_devdata(spi->master);
pdata = &dspi->pdata;
- if (spi->cs_gpio >= 0) {
- /* SPI core parse and update master->cs_gpio */
- gpio_chipsel = true;
- gpio = spi->cs_gpio;
- }
-
/* program delay transfers if tx_delay is non zero */
if (spicfg->wdelay)
spidat1 |= SPIDAT1_WDEL;
@@ -235,11 +227,12 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
* Board specific chip select logic decides the polarity and cs
* line for the controller
*/
- if (gpio_chipsel) {
+ if (spi->cs_gpio >= 0) {
if (value == BITBANG_CS_ACTIVE)
- gpio_set_value(gpio, spi->mode & SPI_CS_HIGH);
+ gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH);
else
- gpio_set_value(gpio, !(spi->mode & SPI_CS_HIGH));
+ gpio_set_value(spi->cs_gpio,
+ !(spi->mode & SPI_CS_HIGH));
} else {
if (value == BITBANG_CS_ACTIVE) {
spidat1 |= SPIDAT1_CSHOLD_MASK;
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 7edede6e024b..a6d7029a85ac 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
+#include <linux/property.h>
#include "spi-dw.h"
@@ -74,13 +75,11 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
dws->max_freq = clk_get_rate(dwsmmio->clk);
- of_property_read_u32(pdev->dev.of_node, "reg-io-width",
- &dws->reg_io_width);
+ device_property_read_u32(&pdev->dev, "reg-io-width", &dws->reg_io_width);
num_cs = 4;
- if (pdev->dev.of_node)
- of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+ device_property_read_u32(&pdev->dev, "num-cs", &num_cs);
dws->num_cs = num_cs;
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 6d331e0db331..332ccb0539a7 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -23,11 +23,6 @@
#define DRIVER_NAME "dw_spi_pci"
-struct dw_spi_pci {
- struct pci_dev *pdev;
- struct dw_spi dws;
-};
-
struct spi_pci_desc {
int (*setup)(struct dw_spi *);
u16 num_cs;
@@ -48,7 +43,6 @@ static struct spi_pci_desc spi_pci_mid_desc_2 = {
static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct dw_spi_pci *dwpci;
struct dw_spi *dws;
struct spi_pci_desc *desc = (struct spi_pci_desc *)ent->driver_data;
int pci_bar = 0;
@@ -58,14 +52,10 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- dwpci = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_pci),
- GFP_KERNEL);
- if (!dwpci)
+ dws = devm_kzalloc(&pdev->dev, sizeof(*dws), GFP_KERNEL);
+ if (!dws)
return -ENOMEM;
- dwpci->pdev = pdev;
- dws = &dwpci->dws;
-
/* Get basic io resource and map it */
dws->paddr = pci_resource_start(pdev, pci_bar);
@@ -74,7 +64,6 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
dws->regs = pcim_iomap_table(pdev)[pci_bar];
-
dws->irq = pdev->irq;
/*
@@ -99,7 +88,7 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
/* PCI hook and SPI hook use the same drv data */
- pci_set_drvdata(pdev, dwpci);
+ pci_set_drvdata(pdev, dws);
dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
pdev->vendor, pdev->device);
@@ -109,26 +98,26 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static void spi_pci_remove(struct pci_dev *pdev)
{
- struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+ struct dw_spi *dws = pci_get_drvdata(pdev);
- dw_spi_remove_host(&dwpci->dws);
+ dw_spi_remove_host(dws);
}
#ifdef CONFIG_PM_SLEEP
static int spi_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+ struct dw_spi *dws = pci_get_drvdata(pdev);
- return dw_spi_suspend_host(&dwpci->dws);
+ return dw_spi_suspend_host(dws);
}
static int spi_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+ struct dw_spi *dws = pci_get_drvdata(pdev);
- return dw_spi_resume_host(&dwpci->dws);
+ return dw_spi_resume_host(dws);
}
#endif
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 4fbfcdc5cb24..882cd6618cd5 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -30,19 +30,13 @@
/* Slave spi_dev related */
struct chip_data {
- u16 cr0;
u8 cs; /* chip select pin */
- u8 n_bytes; /* current is a 1/2/4 byte op */
u8 tmode; /* TR/TO/RO/EEPROM */
u8 type; /* SPI/SSP/MicroWire */
u8 poll_mode; /* 1 means use poll mode */
- u32 dma_width;
- u32 rx_threshold;
- u32 tx_threshold;
u8 enable_dma;
- u8 bits_per_word;
u16 clk_div; /* baud rate divider */
u32 speed_hz; /* baud rate */
void (*cs_control)(u32 command);
@@ -289,14 +283,11 @@ static int dw_spi_transfer_one(struct spi_master *master,
struct chip_data *chip = spi_get_ctldata(spi);
u8 imask = 0;
u16 txlevel = 0;
- u16 clk_div = 0;
- u32 speed = 0;
- u32 cr0 = 0;
+ u16 clk_div;
+ u32 cr0;
int ret;
dws->dma_mapped = 0;
- dws->n_bytes = chip->n_bytes;
- dws->dma_width = chip->dma_width;
dws->tx = (void *)transfer->tx_buf;
dws->tx_end = dws->tx + transfer->len;
@@ -306,37 +297,30 @@ static int dw_spi_transfer_one(struct spi_master *master,
spi_enable_chip(dws, 0);
- cr0 = chip->cr0;
-
/* Handle per transfer options for bpw and speed */
- if (transfer->speed_hz) {
- speed = chip->speed_hz;
-
- if ((transfer->speed_hz != speed) || !chip->clk_div) {
- speed = transfer->speed_hz;
-
- /* clk_div doesn't support odd number */
- clk_div = (dws->max_freq / speed + 1) & 0xfffe;
+ if (transfer->speed_hz != chip->speed_hz) {
+ /* clk_div doesn't support odd number */
+ clk_div = (dws->max_freq / transfer->speed_hz + 1) & 0xfffe;
- chip->speed_hz = speed;
- chip->clk_div = clk_div;
+ chip->speed_hz = transfer->speed_hz;
+ chip->clk_div = clk_div;
- spi_set_clk(dws, chip->clk_div);
- }
+ spi_set_clk(dws, chip->clk_div);
}
- if (transfer->bits_per_word) {
- if (transfer->bits_per_word == 8) {
- dws->n_bytes = 1;
- dws->dma_width = 1;
- } else if (transfer->bits_per_word == 16) {
- dws->n_bytes = 2;
- dws->dma_width = 2;
- }
- cr0 = (transfer->bits_per_word - 1)
- | (chip->type << SPI_FRF_OFFSET)
- | (spi->mode << SPI_MODE_OFFSET)
- | (chip->tmode << SPI_TMOD_OFFSET);
+ if (transfer->bits_per_word == 8) {
+ dws->n_bytes = 1;
+ dws->dma_width = 1;
+ } else if (transfer->bits_per_word == 16) {
+ dws->n_bytes = 2;
+ dws->dma_width = 2;
+ } else {
+ return -EINVAL;
}
+ /* Default SPI mode is SCPOL = 0, SCPH = 0 */
+ cr0 = (transfer->bits_per_word - 1)
+ | (chip->type << SPI_FRF_OFFSET)
+ | (spi->mode << SPI_MODE_OFFSET)
+ | (chip->tmode << SPI_TMOD_OFFSET);
/*
* Adjust transfer mode if necessary. Requires platform dependent
@@ -439,34 +423,9 @@ static int dw_spi_setup(struct spi_device *spi)
chip->poll_mode = chip_info->poll_mode;
chip->type = chip_info->type;
-
- chip->rx_threshold = 0;
- chip->tx_threshold = 0;
- }
-
- if (spi->bits_per_word == 8) {
- chip->n_bytes = 1;
- chip->dma_width = 1;
- } else if (spi->bits_per_word == 16) {
- chip->n_bytes = 2;
- chip->dma_width = 2;
- }
- chip->bits_per_word = spi->bits_per_word;
-
- if (!spi->max_speed_hz) {
- dev_err(&spi->dev, "No max speed HZ parameter\n");
- return -EINVAL;
}
chip->tmode = 0; /* Tx & Rx */
- /* Default SPI mode is SCPOL = 0, SCPH = 0 */
- chip->cr0 = (chip->bits_per_word - 1)
- | (chip->type << SPI_FRF_OFFSET)
- | (spi->mode << SPI_MODE_OFFSET)
- | (chip->tmode << SPI_TMOD_OFFSET);
-
- if (spi->mode & SPI_LOOP)
- chip->cr0 |= 1 << SPI_SRL_OFFSET;
if (gpio_is_valid(spi->cs_gpio)) {
ret = gpio_direction_output(spi->cs_gpio,
@@ -524,13 +483,12 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dws->master = master;
dws->type = SSI_MOTO_SPI;
dws->dma_inited = 0;
- dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
+ dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num);
- ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED,
- dws->name, master);
+ ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dws->name, master);
if (ret < 0) {
- dev_err(&master->dev, "can not get IRQ\n");
+ dev_err(dev, "can not get IRQ\n");
goto err_free_master;
}
@@ -573,6 +531,7 @@ err_dma_exit:
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
spi_enable_chip(dws, 0);
+ free_irq(dws->irq, master);
err_free_master:
spi_master_put(master);
return ret;
@@ -581,28 +540,27 @@ EXPORT_SYMBOL_GPL(dw_spi_add_host);
void dw_spi_remove_host(struct dw_spi *dws)
{
- if (!dws)
- return;
dw_spi_debugfs_remove(dws);
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
- spi_enable_chip(dws, 0);
- /* Disable clk */
- spi_set_clk(dws, 0);
+
+ spi_shutdown_chip(dws);
+
+ free_irq(dws->irq, dws->master);
}
EXPORT_SYMBOL_GPL(dw_spi_remove_host);
int dw_spi_suspend_host(struct dw_spi *dws)
{
- int ret = 0;
+ int ret;
ret = spi_master_suspend(dws->master);
if (ret)
return ret;
- spi_enable_chip(dws, 0);
- spi_set_clk(dws, 0);
- return ret;
+
+ spi_shutdown_chip(dws);
+ return 0;
}
EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index b75ed327d5a2..35589a270468 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -225,6 +225,12 @@ static inline void spi_reset_chip(struct dw_spi *dws)
spi_enable_chip(dws, 1);
}
+static inline void spi_shutdown_chip(struct dw_spi *dws)
+{
+ spi_enable_chip(dws, 0);
+ spi_set_clk(dws, 0);
+}
+
/*
* Each SPI slave device to work with dw_api controller should
* has such a structure claiming its working mode (poll or PIO/DMA),
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 86bcdd68c1fe..59a11437db70 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -409,9 +409,6 @@ static int dspi_transfer_one_message(struct spi_master *master,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
dspi->cur_chip->ctar_val);
- if (transfer->speed_hz)
- regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
- dspi->cur_chip->ctar_val);
trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) {
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index f9deb84e4e55..0e5723ab47f0 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -336,13 +336,20 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
if (config->mode & SPI_CPHA)
cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
+ else
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
if (config->mode & SPI_CPOL) {
cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
+ } else {
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
}
if (config->mode & SPI_CS_HIGH)
cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
+ else
+ cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 1e75341689a6..c3ec46cd9f91 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -302,11 +302,9 @@ static int mpc512x_psc_spi_msg_xfer(struct spi_master *master,
cs_change = 1;
status = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->bits_per_word || t->speed_hz) {
- status = mpc512x_psc_spi_transfer_setup(spi, t);
- if (status < 0)
- break;
- }
+ status = mpc512x_psc_spi_transfer_setup(spi, t);
+ if (status < 0)
+ break;
if (cs_change)
mpc512x_psc_spi_activate_cs(spi);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index ecb6c58238c4..563954a61424 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -20,6 +20,7 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/platform_data/spi-mt65xx.h>
#include <linux/pm_runtime.h>
@@ -84,7 +85,8 @@ struct mtk_spi_compatible {
struct mtk_spi {
void __iomem *base;
u32 state;
- u32 pad_sel;
+ int pad_num;
+ u32 *pad_sel;
struct clk *parent_clk, *sel_clk, *spi_clk;
struct spi_transfer *cur_transfer;
u32 xfer_len;
@@ -131,10 +133,28 @@ static void mtk_spi_reset(struct mtk_spi *mdata)
writel(reg_val, mdata->base + SPI_CMD_REG);
}
-static void mtk_spi_config(struct mtk_spi *mdata,
- struct mtk_chip_config *chip_config)
+static int mtk_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
+ u16 cpha, cpol;
u32 reg_val;
+ struct spi_device *spi = msg->spi;
+ struct mtk_chip_config *chip_config = spi->controller_data;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ cpha = spi->mode & SPI_CPHA ? 1 : 0;
+ cpol = spi->mode & SPI_CPOL ? 1 : 0;
+
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ if (cpha)
+ reg_val |= SPI_CMD_CPHA;
+ else
+ reg_val &= ~SPI_CMD_CPHA;
+ if (cpol)
+ reg_val |= SPI_CMD_CPOL;
+ else
+ reg_val &= ~SPI_CMD_CPOL;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
reg_val = readl(mdata->base + SPI_CMD_REG);
@@ -170,38 +190,8 @@ static void mtk_spi_config(struct mtk_spi *mdata,
/* pad select */
if (mdata->dev_comp->need_pad_sel)
- writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG);
-}
-
-static int mtk_spi_prepare_message(struct spi_master *master,
- struct spi_message *msg)
-{
- u32 reg_val;
- u8 cpha, cpol;
- struct mtk_chip_config *chip_config;
- struct spi_device *spi = msg->spi;
- struct mtk_spi *mdata = spi_master_get_devdata(master);
-
- cpha = spi->mode & SPI_CPHA ? 1 : 0;
- cpol = spi->mode & SPI_CPOL ? 1 : 0;
-
- reg_val = readl(mdata->base + SPI_CMD_REG);
- if (cpha)
- reg_val |= SPI_CMD_CPHA;
- else
- reg_val &= ~SPI_CMD_CPHA;
- if (cpol)
- reg_val |= SPI_CMD_CPOL;
- else
- reg_val &= ~SPI_CMD_CPOL;
- writel(reg_val, mdata->base + SPI_CMD_REG);
-
- chip_config = spi->controller_data;
- if (!chip_config) {
- chip_config = (void *)&mtk_default_chip_info;
- spi->controller_data = chip_config;
- }
- mtk_spi_config(mdata, chip_config);
+ writel(mdata->pad_sel[spi->chip_select],
+ mdata->base + SPI_PAD_SEL_REG);
return 0;
}
@@ -413,6 +403,19 @@ static bool mtk_spi_can_dma(struct spi_master *master,
return xfer->len > MTK_SPI_MAX_FIFO_SIZE;
}
+static int mtk_spi_setup(struct spi_device *spi)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+
+ if (!spi->controller_data)
+ spi->controller_data = (void *)&mtk_default_chip_info;
+
+ if (mdata->dev_comp->need_pad_sel)
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+
+ return 0;
+}
+
static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
{
u32 cmd, reg_val, cnt;
@@ -484,7 +487,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
struct mtk_spi *mdata;
const struct of_device_id *of_id;
struct resource *res;
- int irq, ret;
+ int i, irq, ret;
master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
if (!master) {
@@ -500,6 +503,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
master->prepare_message = mtk_spi_prepare_message;
master->transfer_one = mtk_spi_transfer_one;
master->can_dma = mtk_spi_can_dma;
+ master->setup = mtk_spi_setup;
of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
if (!of_id) {
@@ -514,21 +518,34 @@ static int mtk_spi_probe(struct platform_device *pdev)
master->flags = SPI_MASTER_MUST_TX;
if (mdata->dev_comp->need_pad_sel) {
- ret = of_property_read_u32(pdev->dev.of_node,
- "mediatek,pad-select",
- &mdata->pad_sel);
- if (ret) {
- dev_err(&pdev->dev, "failed to read pad select: %d\n",
- ret);
+ mdata->pad_num = of_property_count_u32_elems(
+ pdev->dev.of_node,
+ "mediatek,pad-select");
+ if (mdata->pad_num < 0) {
+ dev_err(&pdev->dev,
+ "No 'mediatek,pad-select' property\n");
+ ret = -EINVAL;
goto err_put_master;
}
- if (mdata->pad_sel > MT8173_SPI_MAX_PAD_SEL) {
- dev_err(&pdev->dev, "wrong pad-select: %u\n",
- mdata->pad_sel);
- ret = -EINVAL;
+ mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num,
+ sizeof(u32), GFP_KERNEL);
+ if (!mdata->pad_sel) {
+ ret = -ENOMEM;
goto err_put_master;
}
+
+ for (i = 0; i < mdata->pad_num; i++) {
+ of_property_read_u32_index(pdev->dev.of_node,
+ "mediatek,pad-select",
+ i, &mdata->pad_sel[i]);
+ if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) {
+ dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n",
+ i, mdata->pad_sel[i]);
+ ret = -EINVAL;
+ goto err_put_master;
+ }
+ }
}
platform_set_drvdata(pdev, master);
@@ -606,6 +623,26 @@ static int mtk_spi_probe(struct platform_device *pdev)
goto err_put_master;
}
+ if (mdata->dev_comp->need_pad_sel) {
+ if (mdata->pad_num != master->num_chipselect) {
+ dev_err(&pdev->dev,
+ "pad_num does not match num_chipselect(%d != %d)\n",
+ mdata->pad_num, master->num_chipselect);
+ ret = -EINVAL;
+ goto err_put_master;
+ }
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
+ dev_name(&pdev->dev));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't get CS GPIO %i\n", i);
+ goto err_put_master;
+ }
+ }
+ }
+
return 0;
err_disable_clk:
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index 76656a77ec12..b5911282a611 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -207,8 +207,7 @@ static int tiny_spi_of_probe(struct platform_device *pdev)
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
unsigned int i;
- const __be32 *val;
- int len;
+ u32 val;
if (!np)
return 0;
@@ -226,13 +225,10 @@ static int tiny_spi_of_probe(struct platform_device *pdev)
return -ENODEV;
}
hw->bitbang.master->dev.of_node = pdev->dev.of_node;
- val = of_get_property(pdev->dev.of_node,
- "clock-frequency", &len);
- if (val && len >= sizeof(__be32))
- hw->freq = be32_to_cpup(val);
- val = of_get_property(pdev->dev.of_node, "baud-width", &len);
- if (val && len >= sizeof(__be32))
- hw->baudwidth = be32_to_cpup(val);
+ if (!of_property_read_u32(np, "clock-frequency", &val))
+ hw->freq = val;
+ if (!of_property_read_u32(np, "baud-width", &val))
+ hw->baudwidth = val;
return 0;
}
#else /* !CONFIG_OF */
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index e99d6a93d394..07e4ce8273df 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -65,7 +65,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
cpha = mode & SPI_CPHA;
cpol = mode & SPI_CPOL;
- speed_hz = xfer->speed_hz ? : spi->max_speed_hz;
+ speed_hz = xfer->speed_hz;
clkdiv = octeon_get_io_clock_rate() / (2 * speed_hz);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 35b332dacb13..76a8425be227 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -244,12 +244,12 @@ static int omap1_spi100k_setup_transfer(struct spi_device *spi,
{
struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master);
struct omap1_spi100k_cs *cs = spi->controller_state;
- u8 word_len = spi->bits_per_word;
+ u8 word_len;
- if (t != NULL && t->bits_per_word)
+ if (t != NULL)
word_len = t->bits_per_word;
- if (!word_len)
- word_len = 8;
+ else
+ word_len = spi->bits_per_word;
if (spi->bits_per_word > 32)
return -EINVAL;
@@ -302,7 +302,6 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master,
struct spi_device *spi = m->spi;
struct spi_transfer *t = NULL;
int cs_active = 0;
- int par_override = 0;
int status = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
@@ -310,14 +309,9 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master,
status = -EINVAL;
break;
}
- if (par_override || t->speed_hz || t->bits_per_word) {
- par_override = 1;
- status = omap1_spi100k_setup_transfer(spi, t);
- if (status < 0)
- break;
- if (!t->speed_hz && !t->bits_per_word)
- par_override = 0;
- }
+ status = omap1_spi100k_setup_transfer(spi, t);
+ if (status < 0)
+ break;
if (!cs_active) {
omap1_spi100k_force_cs(spi100k, 1);
@@ -347,11 +341,7 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master,
}
}
- /* Restore defaults if they were overriden */
- if (par_override) {
- par_override = 0;
- status = omap1_spi100k_setup_transfer(spi, NULL);
- }
+ status = omap1_spi100k_setup_transfer(spi, NULL);
if (cs_active)
omap1_spi100k_force_cs(spi100k, 0);
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 55576db31549..ce8dbdbce312 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -205,7 +205,7 @@ static void uwire_chipselect(struct spi_device *spi, int value)
static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
{
unsigned len = t->len;
- unsigned bits = t->bits_per_word ? : spi->bits_per_word;
+ unsigned bits = t->bits_per_word;
unsigned bytes;
u16 val, w;
int status = 0;
@@ -344,9 +344,10 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
/* assume it's already enabled */
rate = clk_get_rate(uwire->ck);
- hz = spi->max_speed_hz;
- if (t != NULL && t->speed_hz)
+ if (t != NULL)
hz = t->speed_hz;
+ else
+ hz = spi->max_speed_hz;
if (!hz) {
pr_debug("%s: zero speed?\n", dev_name(&spi->dev));
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 3d09e0b69b73..1f8903d356e5 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1217,6 +1217,33 @@ out:
return status;
}
+static int omap2_mcspi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
+
+ /* Only a single channel can have the FORCE bit enabled
+ * in its chconf0 register.
+ * Scan all channels and disable them except the current one.
+ * A FORCE can remain from a last transfer having cs_change enabled
+ */
+ list_for_each_entry(cs, &ctx->cs, node) {
+ if (msg->spi->controller_state == cs)
+ continue;
+
+ if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) {
+ cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
+ writel_relaxed(cs->chconf0,
+ cs->base + OMAP2_MCSPI_CHCONF0);
+ readl_relaxed(cs->base + OMAP2_MCSPI_CHCONF0);
+ }
+ }
+
+ return 0;
+}
+
static int omap2_mcspi_transfer_one(struct spi_master *master,
struct spi_device *spi, struct spi_transfer *t)
{
@@ -1344,6 +1371,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->setup = omap2_mcspi_setup;
master->auto_runtime_pm = true;
+ master->prepare_message = omap2_mcspi_prepare_message;
master->transfer_one = omap2_mcspi_transfer_one;
master->set_cs = omap2_mcspi_set_cs;
master->cleanup = omap2_mcspi_cleanup;
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 54fb984a3e17..dd3d0a218d8b 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -210,12 +210,12 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
if (in_8(&hw->regs->cdm) != cdm)
out_8(&hw->regs->cdm, cdm);
- spin_lock(&hw->bitbang.lock);
+ mutex_lock(&hw->bitbang.lock);
if (!hw->bitbang.busy) {
hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
/* Need to ndelay here? */
}
- spin_unlock(&hw->bitbang.lock);
+ mutex_unlock(&hw->bitbang.lock);
return 0;
}
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 66a173939be8..bd8b369a343c 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -344,10 +344,6 @@ void pxa2xx_spi_dma_release(struct driver_data *drv_data)
}
}
-void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
-{
-}
-
int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
struct spi_device *spi,
u8 bits_per_word, u32 *burst_code,
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index a8ef38ebb9c9..b25dc71b0ea9 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -13,6 +13,7 @@
* GNU General Public License for more details.
*/
+#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -61,9 +62,13 @@ MODULE_ALIAS("platform:pxa2xx-spi");
| QUARK_X1000_SSCR1_TFT \
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
-#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
-#define SPI_CS_CONTROL_SW_MODE BIT(0)
-#define SPI_CS_CONTROL_CS_HIGH BIT(1)
+#define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
+#define LPSS_CS_CONTROL_SW_MODE BIT(0)
+#define LPSS_CS_CONTROL_CS_HIGH BIT(1)
+#define LPSS_CS_CONTROL_CS_SEL_SHIFT 8
+#define LPSS_CS_CONTROL_CS_SEL_MASK (3 << LPSS_CS_CONTROL_CS_SEL_SHIFT)
+#define LPSS_CAPS_CS_EN_SHIFT 9
+#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
struct lpss_config {
/* LPSS offset from drv_data->ioaddr */
@@ -72,6 +77,7 @@ struct lpss_config {
int reg_general;
int reg_ssp;
int reg_cs_ctrl;
+ int reg_capabilities;
/* FIFO thresholds */
u32 rx_threshold;
u32 tx_threshold_lo;
@@ -85,6 +91,7 @@ static const struct lpss_config lpss_platforms[] = {
.reg_general = 0x08,
.reg_ssp = 0x0c,
.reg_cs_ctrl = 0x18,
+ .reg_capabilities = -1,
.rx_threshold = 64,
.tx_threshold_lo = 160,
.tx_threshold_hi = 224,
@@ -94,6 +101,7 @@ static const struct lpss_config lpss_platforms[] = {
.reg_general = 0x08,
.reg_ssp = 0x0c,
.reg_cs_ctrl = 0x18,
+ .reg_capabilities = -1,
.rx_threshold = 64,
.tx_threshold_lo = 160,
.tx_threshold_hi = 224,
@@ -103,10 +111,21 @@ static const struct lpss_config lpss_platforms[] = {
.reg_general = -1,
.reg_ssp = 0x20,
.reg_cs_ctrl = 0x24,
+ .reg_capabilities = 0xfc,
.rx_threshold = 1,
.tx_threshold_lo = 32,
.tx_threshold_hi = 56,
},
+ { /* LPSS_BXT_SSP */
+ .offset = 0x200,
+ .reg_general = -1,
+ .reg_ssp = 0x20,
+ .reg_cs_ctrl = 0x24,
+ .reg_capabilities = 0xfc,
+ .rx_threshold = 1,
+ .tx_threshold_lo = 16,
+ .tx_threshold_hi = 48,
+ },
};
static inline const struct lpss_config
@@ -121,6 +140,7 @@ static bool is_lpss_ssp(const struct driver_data *drv_data)
case LPSS_LPT_SSP:
case LPSS_BYT_SSP:
case LPSS_SPT_SSP:
+ case LPSS_BXT_SSP:
return true;
default:
return false;
@@ -249,7 +269,9 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
drv_data->lpss_base = drv_data->ioaddr + config->offset;
/* Enable software chip select control */
- value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
+ value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
+ value &= ~(LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH);
+ value |= LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
/* Enable multiblock DMA transfers */
@@ -259,7 +281,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
if (config->reg_general >= 0) {
value = __lpss_ssp_read_priv(drv_data,
config->reg_general);
- value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE;
+ value |= LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
__lpss_ssp_write_priv(drv_data,
config->reg_general, value);
}
@@ -269,15 +291,34 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
{
const struct lpss_config *config;
- u32 value;
+ u32 value, cs;
config = lpss_get_config(drv_data);
value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
- if (enable)
- value &= ~SPI_CS_CONTROL_CS_HIGH;
- else
- value |= SPI_CS_CONTROL_CS_HIGH;
+ if (enable) {
+ cs = drv_data->cur_msg->spi->chip_select;
+ cs <<= LPSS_CS_CONTROL_CS_SEL_SHIFT;
+ if (cs != (value & LPSS_CS_CONTROL_CS_SEL_MASK)) {
+ /*
+ * When switching another chip select output active
+ * the output must be selected first and wait 2 ssp_clk
+ * cycles before changing state to active. Otherwise
+ * a short glitch will occur on the previous chip
+ * select since output select is latched but state
+ * control is not.
+ */
+ value &= ~LPSS_CS_CONTROL_CS_SEL_MASK;
+ value |= cs;
+ __lpss_ssp_write_priv(drv_data,
+ config->reg_cs_ctrl, value);
+ ndelay(1000000000 /
+ (drv_data->master->max_speed_hz / 2));
+ }
+ value &= ~LPSS_CS_CONTROL_CS_HIGH;
+ } else {
+ value |= LPSS_CS_CONTROL_CS_HIGH;
+ }
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
}
@@ -734,7 +775,7 @@ static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
mul = (1 << 24) >> 1;
/* Calculate initial quot */
- q1 = DIV_ROUND_CLOSEST(fref1, rate);
+ q1 = DIV_ROUND_UP(fref1, rate);
/* Scale q1 if it's too big */
if (q1 > 256) {
@@ -759,7 +800,7 @@ static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
/* Case 2 */
- q2 = DIV_ROUND_CLOSEST(fref2, rate);
+ q2 = DIV_ROUND_UP(fref2, rate);
r2 = abs(fref2 / q2 - rate);
/*
@@ -778,13 +819,13 @@ static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
mul = (1 << 24) * 2 / 5;
}
- /* Check case 3 only If the divisor is big enough */
+ /* Check case 3 only if the divisor is big enough */
if (fref / rate >= 80) {
u64 fssp;
u32 m;
/* Calculate initial quot */
- q1 = DIV_ROUND_CLOSEST(fref, rate);
+ q1 = DIV_ROUND_UP(fref, rate);
m = (1 << 24) / q1;
/* Get the remainder */
@@ -806,7 +847,7 @@ static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
{
- unsigned long ssp_clk = drv_data->max_clk_rate;
+ unsigned long ssp_clk = drv_data->master->max_speed_hz;
const struct ssp_device *ssp = drv_data->ssp;
rate = min_t(int, ssp_clk, rate);
@@ -818,8 +859,9 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
}
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
- struct chip_data *chip, int rate)
+ int rate)
{
+ struct chip_data *chip = drv_data->cur_chip;
unsigned int clk_div;
switch (drv_data->ssp_type) {
@@ -922,52 +964,55 @@ static void pump_transfers(unsigned long data)
drv_data->read = drv_data->rx ? chip->read : null_reader;
/* Change speed and bit per word on a per transfer */
- cr0 = chip->cr0;
- if (transfer->speed_hz || transfer->bits_per_word) {
-
- bits = chip->bits_per_word;
- speed = chip->speed_hz;
-
- if (transfer->speed_hz)
- speed = transfer->speed_hz;
-
- if (transfer->bits_per_word)
- bits = transfer->bits_per_word;
-
- clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, speed);
-
- if (bits <= 8) {
- drv_data->n_bytes = 1;
- drv_data->read = drv_data->read != null_reader ?
- u8_reader : null_reader;
- drv_data->write = drv_data->write != null_writer ?
- u8_writer : null_writer;
- } else if (bits <= 16) {
- drv_data->n_bytes = 2;
- drv_data->read = drv_data->read != null_reader ?
- u16_reader : null_reader;
- drv_data->write = drv_data->write != null_writer ?
- u16_writer : null_writer;
- } else if (bits <= 32) {
- drv_data->n_bytes = 4;
- drv_data->read = drv_data->read != null_reader ?
- u32_reader : null_reader;
- drv_data->write = drv_data->write != null_writer ?
- u32_writer : null_writer;
- }
- /* if bits/word is changed in dma mode, then must check the
- * thresholds and burst also */
- if (chip->enable_dma) {
- if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
- message->spi,
- bits, &dma_burst,
- &dma_thresh))
- dev_warn_ratelimited(&message->spi->dev,
- "pump_transfers: DMA burst size reduced to match bits_per_word\n");
- }
-
- cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
+ bits = transfer->bits_per_word;
+ speed = transfer->speed_hz;
+
+ clk_div = pxa2xx_ssp_get_clk_div(drv_data, speed);
+
+ if (bits <= 8) {
+ drv_data->n_bytes = 1;
+ drv_data->read = drv_data->read != null_reader ?
+ u8_reader : null_reader;
+ drv_data->write = drv_data->write != null_writer ?
+ u8_writer : null_writer;
+ } else if (bits <= 16) {
+ drv_data->n_bytes = 2;
+ drv_data->read = drv_data->read != null_reader ?
+ u16_reader : null_reader;
+ drv_data->write = drv_data->write != null_writer ?
+ u16_writer : null_writer;
+ } else if (bits <= 32) {
+ drv_data->n_bytes = 4;
+ drv_data->read = drv_data->read != null_reader ?
+ u32_reader : null_reader;
+ drv_data->write = drv_data->write != null_writer ?
+ u32_writer : null_writer;
}
+ /*
+ * if bits/word is changed in dma mode, then must check the
+ * thresholds and burst also
+ */
+ if (chip->enable_dma) {
+ if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
+ message->spi,
+ bits, &dma_burst,
+ &dma_thresh))
+ dev_warn_ratelimited(&message->spi->dev,
+ "pump_transfers: DMA burst size reduced to match bits_per_word\n");
+ }
+
+ /* NOTE: PXA25x_SSP _could_ use external clocking ... */
+ cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
+ if (!pxa25x_ssp_comp(drv_data))
+ dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+ drv_data->master->max_speed_hz
+ / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
+ chip->enable_dma ? "DMA" : "PIO");
+ else
+ dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+ drv_data->master->max_speed_hz / 2
+ / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
+ chip->enable_dma ? "DMA" : "PIO");
message->state = RUNNING_STATE;
@@ -1111,7 +1156,6 @@ static int setup(struct spi_device *spi)
struct chip_data *chip;
const struct lpss_config *config;
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
- unsigned int clk_div;
uint tx_thres, tx_hi_thres, rx_thres;
switch (drv_data->ssp_type) {
@@ -1123,6 +1167,7 @@ static int setup(struct spi_device *spi)
case LPSS_LPT_SSP:
case LPSS_BYT_SSP:
case LPSS_SPT_SSP:
+ case LPSS_BXT_SSP:
config = lpss_get_config(drv_data);
tx_thres = config->tx_threshold_lo;
tx_hi_thres = config->tx_threshold_hi;
@@ -1203,11 +1248,6 @@ static int setup(struct spi_device *spi)
}
}
- clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, spi->max_speed_hz);
- chip->speed_hz = spi->max_speed_hz;
-
- chip->cr0 = pxa2xx_configure_sscr0(drv_data, clk_div,
- spi->bits_per_word);
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
@@ -1228,18 +1268,6 @@ static int setup(struct spi_device *spi)
if (spi->mode & SPI_LOOP)
chip->cr1 |= SSCR1_LBM;
- /* NOTE: PXA25x_SSP _could_ use external clocking ... */
- if (!pxa25x_ssp_comp(drv_data))
- dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
- drv_data->max_clk_rate
- / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
- chip->enable_dma ? "DMA" : "PIO");
- else
- dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
- drv_data->max_clk_rate / 2
- / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
- chip->enable_dma ? "DMA" : "PIO");
-
if (spi->bits_per_word <= 8) {
chip->n_bytes = 1;
chip->read = u8_reader;
@@ -1249,13 +1277,10 @@ static int setup(struct spi_device *spi)
chip->read = u16_reader;
chip->write = u16_writer;
} else if (spi->bits_per_word <= 32) {
- if (!is_quark_x1000_ssp(drv_data))
- chip->cr0 |= SSCR0_EDSS;
chip->n_bytes = 4;
chip->read = u32_reader;
chip->write = u32_writer;
}
- chip->bits_per_word = spi->bits_per_word;
spi_set_ctldata(spi, chip);
@@ -1279,6 +1304,7 @@ static void cleanup(struct spi_device *spi)
kfree(chip);
}
+#ifdef CONFIG_PCI
#ifdef CONFIG_ACPI
static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
@@ -1292,6 +1318,23 @@ static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
+{
+ unsigned int devid;
+ int port_id = -1;
+
+ if (adev && adev->pnp.unique_id &&
+ !kstrtouint(adev->pnp.unique_id, 0, &devid))
+ port_id = devid;
+ return port_id;
+}
+#else /* !CONFIG_ACPI */
+static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
+{
+ return -1;
+}
+#endif
+
/*
* PCI IDs of compound devices that integrate both host controller and private
* integrated DMA engine. Please note these are not used in module
@@ -1304,6 +1347,14 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
/* SPT-H */
{ PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
{ PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
+ /* BXT */
+ { PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x0ac6), LPSS_BXT_SSP },
+ /* APL */
+ { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x5ac6), LPSS_BXT_SSP },
{ },
};
@@ -1318,7 +1369,7 @@ static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
}
static struct pxa2xx_spi_master *
-pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
+pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
struct pxa2xx_spi_master *pdata;
struct acpi_device *adev;
@@ -1326,18 +1377,18 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
struct resource *res;
const struct acpi_device_id *adev_id = NULL;
const struct pci_device_id *pcidev_id = NULL;
- int devid, type;
+ int type;
- if (!ACPI_HANDLE(&pdev->dev) ||
- acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
- return NULL;
+ adev = ACPI_COMPANION(&pdev->dev);
if (dev_is_pci(pdev->dev.parent))
pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match,
to_pci_dev(pdev->dev.parent));
- else
+ else if (adev)
adev_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
+ else
+ return NULL;
if (adev_id)
type = (int)adev_id->driver_data;
@@ -1371,10 +1422,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
ssp->irq = platform_get_irq(pdev, 0);
ssp->type = type;
ssp->pdev = pdev;
-
- ssp->port_id = -1;
- if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid))
- ssp->port_id = devid;
+ ssp->port_id = pxa2xx_spi_get_port_id(adev);
pdata->num_chipselect = 1;
pdata->enable_dma = true;
@@ -1382,9 +1430,9 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
return pdata;
}
-#else
+#else /* !CONFIG_PCI */
static inline struct pxa2xx_spi_master *
-pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
+pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
return NULL;
}
@@ -1397,12 +1445,13 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct driver_data *drv_data;
struct ssp_device *ssp;
+ const struct lpss_config *config;
int status;
u32 tmp;
platform_info = dev_get_platdata(dev);
if (!platform_info) {
- platform_info = pxa2xx_spi_acpi_get_pdata(pdev);
+ platform_info = pxa2xx_spi_init_pdata(pdev);
if (!platform_info) {
dev_err(&pdev->dev, "missing platform data\n");
return -ENODEV;
@@ -1436,7 +1485,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
master->bus_num = ssp->port_id;
- master->num_chipselect = platform_info->num_chipselect;
master->dma_alignment = DMA_ALIGNMENT;
master->cleanup = cleanup;
master->setup = setup;
@@ -1489,7 +1537,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Enable SOC clock */
clk_prepare_enable(ssp->clk);
- drv_data->max_clk_rate = clk_get_rate(ssp->clk);
+ master->max_speed_hz = clk_get_rate(ssp->clk);
/* Load default SSP configuration */
pxa2xx_spi_write(drv_data, SSCR0, 0);
@@ -1522,6 +1570,19 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (is_lpss_ssp(drv_data))
lpss_ssp_setup(drv_data);
+ if (is_lpss_ssp(drv_data)) {
+ lpss_ssp_setup(drv_data);
+ config = lpss_get_config(drv_data);
+ if (config->reg_capabilities >= 0) {
+ tmp = __lpss_ssp_read_priv(drv_data,
+ config->reg_capabilities);
+ tmp &= LPSS_CAPS_CS_EN_MASK;
+ tmp >>= LPSS_CAPS_CS_EN_SHIFT;
+ platform_info->num_chipselect = ffz(tmp);
+ }
+ }
+ master->num_chipselect = platform_info->num_chipselect;
+
tasklet_init(&drv_data->pump_transfers, pump_transfers,
(unsigned long)drv_data);
@@ -1614,8 +1675,6 @@ static int pxa2xx_spi_resume(struct device *dev)
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
- pxa2xx_spi_dma_resume(drv_data);
-
/* Enable the SSP clock */
if (!pm_runtime_suspended(dev))
clk_prepare_enable(ssp->clk);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 0a9b6390a817..58efa98313aa 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -46,9 +46,6 @@ struct driver_data {
u32 clear_sr;
u32 mask_sr;
- /* Maximun clock rate */
- unsigned long max_clk_rate;
-
/* Message Transfer pump */
struct tasklet_struct pump_transfers;
@@ -86,10 +83,8 @@ struct driver_data {
};
struct chip_data {
- u32 cr0;
u32 cr1;
u32 dds_rate;
- u32 psp;
u32 timeout;
u8 n_bytes;
u32 dma_burst_size;
@@ -98,8 +93,6 @@ struct chip_data {
u16 lpss_rx_threshold;
u16 lpss_tx_threshold;
u8 enable_dma;
- u8 bits_per_word;
- u32 speed_hz;
union {
int gpio_cs;
unsigned int frm;
@@ -175,7 +168,6 @@ extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst);
extern void pxa2xx_spi_dma_start(struct driver_data *drv_data);
extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data);
extern void pxa2xx_spi_dma_release(struct driver_data *drv_data);
-extern void pxa2xx_spi_dma_resume(struct driver_data *drv_data);
extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
struct spi_device *spi,
u8 bits_per_word,
@@ -196,7 +188,6 @@ static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
return 0;
}
static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {}
-static inline void pxa2xx_spi_dma_resume(struct driver_data *drv_data) {}
static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
struct spi_device *spi,
u8 bits_per_word,
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index f36bc320a807..4e7d1bfed7e6 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -198,12 +198,12 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
if (ret)
return ret;
- spin_lock(&hw->bitbang.lock);
+ mutex_lock(&hw->bitbang.lock);
if (!hw->bitbang.busy) {
hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
/* need to ndelay for 0.5 clocktick ? */
}
- spin_unlock(&hw->bitbang.lock);
+ mutex_unlock(&hw->bitbang.lock);
return 0;
}
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index cd1cfac0447f..8e86e7f6663a 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -32,6 +32,7 @@
#define MAX_SPI_PORTS 6
#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
#define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
+#define AUTOSUSPEND_TIMEOUT 2000
/* Registers and bit-fields */
@@ -682,7 +683,7 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
/* Only BPW and Speed may change across transfers */
bpw = xfer->bits_per_word;
- speed = xfer->speed_hz ? : spi->max_speed_hz;
+ speed = xfer->speed_hz;
if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
sdd->cur_bpw = bpw;
@@ -859,13 +860,15 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
}
}
- pm_runtime_put(&sdd->pdev->dev);
+ pm_runtime_mark_last_busy(&sdd->pdev->dev);
+ pm_runtime_put_autosuspend(&sdd->pdev->dev);
if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
return 0;
setup_exit:
- pm_runtime_put(&sdd->pdev->dev);
+ pm_runtime_mark_last_busy(&sdd->pdev->dev);
+ pm_runtime_put_autosuspend(&sdd->pdev->dev);
/* setup() returns with device de-selected */
if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
@@ -1162,6 +1165,12 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
goto err2;
}
+ pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
/* Setup Deufult Mode */
s3c64xx_spi_hwinit(sdd, sdd->port_id);
@@ -1180,9 +1189,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
sdd->regs + S3C64XX_SPI_INT_EN);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
ret = devm_spi_register_master(&pdev->dev, master);
if (ret != 0) {
dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
@@ -1195,9 +1201,16 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1,
sdd->rx_dma.dmach, sdd->tx_dma.dmach);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
err3:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
clk_disable_unprepare(sdd->src_clk);
err2:
clk_disable_unprepare(sdd->clk);
@@ -1212,7 +1225,7 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
@@ -1220,6 +1233,10 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(sdd->clk);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
return 0;
}
@@ -1233,10 +1250,9 @@ static int s3c64xx_spi_suspend(struct device *dev)
if (ret)
return ret;
- if (!pm_runtime_suspended(dev)) {
- clk_disable_unprepare(sdd->clk);
- clk_disable_unprepare(sdd->src_clk);
- }
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0)
+ return ret;
sdd->cur_speed = 0; /* Output Clock is stopped */
@@ -1248,14 +1264,14 @@ static int s3c64xx_spi_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
+ int ret;
if (sci->cfg_gpio)
sci->cfg_gpio();
- if (!pm_runtime_suspended(dev)) {
- clk_prepare_enable(sdd->src_clk);
- clk_prepare_enable(sdd->clk);
- }
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
s3c64xx_spi_hwinit(sdd, sdd->port_id);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index aa6d284131e0..64318fcfacf2 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -39,8 +39,6 @@ struct ti_qspi_regs {
};
struct ti_qspi {
- struct completion transfer_complete;
-
/* list synchronization */
struct mutex list_lock;
@@ -62,10 +60,6 @@ struct ti_qspi {
#define QSPI_PID (0x0)
#define QSPI_SYSCONFIG (0x10)
-#define QSPI_INTR_STATUS_RAW_SET (0x20)
-#define QSPI_INTR_STATUS_ENABLED_CLEAR (0x24)
-#define QSPI_INTR_ENABLE_SET_REG (0x28)
-#define QSPI_INTR_ENABLE_CLEAR_REG (0x2c)
#define QSPI_SPI_CLOCK_CNTRL_REG (0x40)
#define QSPI_SPI_DC_REG (0x44)
#define QSPI_SPI_CMD_REG (0x48)
@@ -97,7 +91,6 @@ struct ti_qspi {
#define QSPI_RD_DUAL (3 << 16)
#define QSPI_RD_QUAD (7 << 16)
#define QSPI_INVAL (4 << 16)
-#define QSPI_WC_CMD_INT_EN (1 << 14)
#define QSPI_FLEN(n) ((n - 1) << 0)
#define QSPI_WLEN_MAX_BITS 128
#define QSPI_WLEN_MAX_BYTES 16
@@ -106,10 +99,6 @@ struct ti_qspi {
#define BUSY 0x01
#define WC 0x02
-/* INTERRUPT REGISTER */
-#define QSPI_WC_INT_EN (1 << 1)
-#define QSPI_WC_INT_DISABLE (1 << 1)
-
/* Device Control */
#define QSPI_DD(m, n) (m << (3 + n * 8))
#define QSPI_CKPHA(n) (1 << (2 + n * 8))
@@ -217,6 +206,24 @@ static inline u32 qspi_is_busy(struct ti_qspi *qspi)
return stat & BUSY;
}
+static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
+{
+ u32 stat;
+ unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
+
+ do {
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ if (stat & WC)
+ return 0;
+ cpu_relax();
+ } while (time_after(timeout, jiffies));
+
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ if (stat & WC)
+ return 0;
+ return -ETIMEDOUT;
+}
+
static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
{
int wlen, count, xfer_len;
@@ -275,8 +282,7 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
}
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
- if (!wait_for_completion_timeout(&qspi->transfer_complete,
- QSPI_COMPLETION_TIMEOUT)) {
+ if (ti_qspi_poll_wc(qspi)) {
dev_err(qspi->dev, "write timed out\n");
return -ETIMEDOUT;
}
@@ -315,8 +321,7 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
return -EBUSY;
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
- if (!wait_for_completion_timeout(&qspi->transfer_complete,
- QSPI_COMPLETION_TIMEOUT)) {
+ if (ti_qspi_poll_wc(qspi)) {
dev_err(qspi->dev, "read timed out\n");
return -ETIMEDOUT;
}
@@ -388,9 +393,7 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
qspi->cmd = 0;
qspi->cmd |= QSPI_EN_CS(spi->chip_select);
qspi->cmd |= QSPI_FLEN(frame_length);
- qspi->cmd |= QSPI_WC_CMD_INT_EN;
- ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG);
ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
mutex_lock(&qspi->list_lock);
@@ -410,39 +413,13 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
mutex_unlock(&qspi->list_lock);
+ ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
m->status = status;
spi_finalize_current_message(master);
- ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
-
return status;
}
-static irqreturn_t ti_qspi_isr(int irq, void *dev_id)
-{
- struct ti_qspi *qspi = dev_id;
- u16 int_stat;
- u32 stat;
-
- irqreturn_t ret = IRQ_HANDLED;
-
- int_stat = ti_qspi_read(qspi, QSPI_INTR_STATUS_ENABLED_CLEAR);
- stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
-
- if (!int_stat) {
- dev_dbg(qspi->dev, "No IRQ triggered\n");
- ret = IRQ_NONE;
- goto out;
- }
-
- ti_qspi_write(qspi, QSPI_WC_INT_DISABLE,
- QSPI_INTR_STATUS_ENABLED_CLEAR);
- if (stat & WC)
- complete(&qspi->transfer_complete);
-out:
- return ret;
-}
-
static int ti_qspi_runtime_resume(struct device *dev)
{
struct ti_qspi *qspi;
@@ -551,22 +528,12 @@ static int ti_qspi_probe(struct platform_device *pdev)
}
}
- ret = devm_request_irq(&pdev->dev, irq, ti_qspi_isr, 0,
- dev_name(&pdev->dev), qspi);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
- irq);
- goto free_master;
- }
-
qspi->fclk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(qspi->fclk)) {
ret = PTR_ERR(qspi->fclk);
dev_err(&pdev->dev, "could not get clk: %d\n", ret);
}
- init_completion(&qspi->transfer_complete);
-
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_enable(&pdev->dev);
@@ -587,18 +554,7 @@ free_master:
static int ti_qspi_remove(struct platform_device *pdev)
{
- struct ti_qspi *qspi = platform_get_drvdata(pdev);
- int ret;
-
- ret = pm_runtime_get_sync(qspi->dev);
- if (ret < 0) {
- dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
- return ret;
- }
-
- ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
-
- pm_runtime_put(qspi->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
index daf5aa1c24c3..c6ae775289e5 100644
--- a/drivers/spi/spi-tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -307,7 +307,6 @@ static int tle62x0_remove(struct spi_device *spi)
static struct spi_driver tle62x0_driver = {
.driver = {
.name = "tle62x0",
- .owner = THIS_MODULE,
},
.probe = tle62x0_probe,
.remove = tle62x0_remove,
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 9190124b6d90..d69f8f8f3fa6 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -181,7 +181,7 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
u32 data;
unsigned int len = t->len;
unsigned int wsize;
- u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
+ u32 speed_hz = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
wsize = bits_per_word >> 3; /* in bytes */
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index a339c1e9997a..3009121173cd 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -270,6 +270,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
while (remaining_words) {
int n_words, tx_words, rx_words;
+ u32 sr;
n_words = min(remaining_words, xspi->buffer_size);
@@ -284,24 +285,33 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
if (use_irq) {
xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
wait_for_completion(&xspi->done);
- } else
- while (!(xspi->read_fn(xspi->regs + XSPI_SR_OFFSET) &
- XSPI_SR_TX_EMPTY_MASK))
- ;
-
- /* A transmit has just completed. Process received data and
- * check for more data to transmit. Always inhibit the
- * transmitter while the Isr refills the transmit register/FIFO,
- * or make sure it is stopped if we're done.
- */
- if (use_irq)
+ /* A transmit has just completed. Process received data
+ * and check for more data to transmit. Always inhibit
+ * the transmitter while the Isr refills the transmit
+ * register/FIFO, or make sure it is stopped if we're
+ * done.
+ */
xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
- xspi->regs + XSPI_CR_OFFSET);
+ xspi->regs + XSPI_CR_OFFSET);
+ sr = XSPI_SR_TX_EMPTY_MASK;
+ } else
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
/* Read out all the data from the Rx FIFO */
rx_words = n_words;
- while (rx_words--)
- xilinx_spi_rx(xspi);
+ while (rx_words) {
+ if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
+ xilinx_spi_rx(xspi);
+ rx_words--;
+ continue;
+ }
+
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+ if (!(sr & XSPI_SR_RX_EMPTY_MASK)) {
+ xilinx_spi_rx(xspi);
+ rx_words--;
+ }
+ }
remaining_words -= n_words;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a5f53de813d3..e2415be209d5 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -123,6 +123,28 @@ SPI_STATISTICS_SHOW(bytes, "%llu");
SPI_STATISTICS_SHOW(bytes_rx, "%llu");
SPI_STATISTICS_SHOW(bytes_tx, "%llu");
+#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
+ SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
+ "transfer_bytes_histo_" number, \
+ transfer_bytes_histo[index], "%lu")
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
+
static struct attribute *spi_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL,
@@ -143,6 +165,23 @@ static struct attribute *spi_device_statistics_attrs[] = {
&dev_attr_spi_device_bytes.attr,
&dev_attr_spi_device_bytes_rx.attr,
&dev_attr_spi_device_bytes_tx.attr,
+ &dev_attr_spi_device_transfer_bytes_histo0.attr,
+ &dev_attr_spi_device_transfer_bytes_histo1.attr,
+ &dev_attr_spi_device_transfer_bytes_histo2.attr,
+ &dev_attr_spi_device_transfer_bytes_histo3.attr,
+ &dev_attr_spi_device_transfer_bytes_histo4.attr,
+ &dev_attr_spi_device_transfer_bytes_histo5.attr,
+ &dev_attr_spi_device_transfer_bytes_histo6.attr,
+ &dev_attr_spi_device_transfer_bytes_histo7.attr,
+ &dev_attr_spi_device_transfer_bytes_histo8.attr,
+ &dev_attr_spi_device_transfer_bytes_histo9.attr,
+ &dev_attr_spi_device_transfer_bytes_histo10.attr,
+ &dev_attr_spi_device_transfer_bytes_histo11.attr,
+ &dev_attr_spi_device_transfer_bytes_histo12.attr,
+ &dev_attr_spi_device_transfer_bytes_histo13.attr,
+ &dev_attr_spi_device_transfer_bytes_histo14.attr,
+ &dev_attr_spi_device_transfer_bytes_histo15.attr,
+ &dev_attr_spi_device_transfer_bytes_histo16.attr,
NULL,
};
@@ -168,6 +207,23 @@ static struct attribute *spi_master_statistics_attrs[] = {
&dev_attr_spi_master_bytes.attr,
&dev_attr_spi_master_bytes_rx.attr,
&dev_attr_spi_master_bytes_tx.attr,
+ &dev_attr_spi_master_transfer_bytes_histo0.attr,
+ &dev_attr_spi_master_transfer_bytes_histo1.attr,
+ &dev_attr_spi_master_transfer_bytes_histo2.attr,
+ &dev_attr_spi_master_transfer_bytes_histo3.attr,
+ &dev_attr_spi_master_transfer_bytes_histo4.attr,
+ &dev_attr_spi_master_transfer_bytes_histo5.attr,
+ &dev_attr_spi_master_transfer_bytes_histo6.attr,
+ &dev_attr_spi_master_transfer_bytes_histo7.attr,
+ &dev_attr_spi_master_transfer_bytes_histo8.attr,
+ &dev_attr_spi_master_transfer_bytes_histo9.attr,
+ &dev_attr_spi_master_transfer_bytes_histo10.attr,
+ &dev_attr_spi_master_transfer_bytes_histo11.attr,
+ &dev_attr_spi_master_transfer_bytes_histo12.attr,
+ &dev_attr_spi_master_transfer_bytes_histo13.attr,
+ &dev_attr_spi_master_transfer_bytes_histo14.attr,
+ &dev_attr_spi_master_transfer_bytes_histo15.attr,
+ &dev_attr_spi_master_transfer_bytes_histo16.attr,
NULL,
};
@@ -186,10 +242,15 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
struct spi_master *master)
{
unsigned long flags;
+ int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
+
+ if (l2len < 0)
+ l2len = 0;
spin_lock_irqsave(&stats->lock, flags);
stats->transfers++;
+ stats->transfer_bytes_histo[l2len]++;
stats->bytes += xfer->len;
if ((xfer->tx_buf) &&
@@ -270,15 +331,24 @@ EXPORT_SYMBOL_GPL(spi_bus_type);
static int spi_drv_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ struct spi_device *spi = to_spi_device(dev);
int ret;
ret = of_clk_set_defaults(dev->of_node, false);
if (ret)
return ret;
+ if (dev->of_node) {
+ spi->irq = of_irq_get(dev->of_node, 0);
+ if (spi->irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (spi->irq < 0)
+ spi->irq = 0;
+ }
+
ret = dev_pm_domain_attach(dev, true);
if (ret != -EPROBE_DEFER) {
- ret = sdrv->probe(to_spi_device(dev));
+ ret = sdrv->probe(spi);
if (ret)
dev_pm_domain_detach(dev, true);
}
@@ -305,12 +375,15 @@ static void spi_drv_shutdown(struct device *dev)
}
/**
- * spi_register_driver - register a SPI driver
+ * __spi_register_driver - register a SPI driver
* @sdrv: the driver to register
* Context: can sleep
+ *
+ * Return: zero on success, else a negative error code.
*/
-int spi_register_driver(struct spi_driver *sdrv)
+int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
{
+ sdrv->driver.owner = owner;
sdrv->driver.bus = &spi_bus_type;
if (sdrv->probe)
sdrv->driver.probe = spi_drv_probe;
@@ -320,7 +393,7 @@ int spi_register_driver(struct spi_driver *sdrv)
sdrv->driver.shutdown = spi_drv_shutdown;
return driver_register(&sdrv->driver);
}
-EXPORT_SYMBOL_GPL(spi_register_driver);
+EXPORT_SYMBOL_GPL(__spi_register_driver);
/*-------------------------------------------------------------------------*/
@@ -359,7 +432,7 @@ static DEFINE_MUTEX(board_lock);
* needs to discard the spi_device without adding it, then it should
* call spi_dev_put() on it.
*
- * Returns a pointer to the new device, or NULL.
+ * Return: a pointer to the new device, or NULL.
*/
struct spi_device *spi_alloc_device(struct spi_master *master)
{
@@ -418,7 +491,7 @@ static int spi_dev_check(struct device *dev, void *data)
* Companion function to spi_alloc_device. Devices allocated with
* spi_alloc_device can be added onto the spi bus with this function.
*
- * Returns 0 on success; negative errno on failure
+ * Return: 0 on success; negative errno on failure
*/
int spi_add_device(struct spi_device *spi)
{
@@ -491,7 +564,7 @@ EXPORT_SYMBOL_GPL(spi_add_device);
* this is exported so that for example a USB or parport based adapter
* driver could add devices (which it would learn about out-of-band).
*
- * Returns the new device, or NULL.
+ * Return: the new device, or NULL.
*/
struct spi_device *spi_new_device(struct spi_master *master,
struct spi_board_info *chip)
@@ -563,6 +636,8 @@ static void spi_match_master_to_boardinfo(struct spi_master *master,
*
* The board info passed can safely be __initdata ... but be careful of
* any embedded pointers (platform_data, etc), they're copied as-is.
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_register_board_info(struct spi_board_info const *info, unsigned n)
{
@@ -597,7 +672,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (spi->cs_gpio >= 0)
+ if (gpio_is_valid(spi->cs_gpio))
gpio_set_value(spi->cs_gpio, !enable);
else if (spi->master->set_cs)
spi->master->set_cs(spi, !enable);
@@ -1140,6 +1215,8 @@ static int spi_init_queue(struct spi_master *master)
*
* If there are more messages in the queue, the next message is returned from
* this call.
+ *
+ * Return: the next message in the queue, else NULL if the queue is empty.
*/
struct spi_message *spi_get_next_queued_message(struct spi_master *master)
{
@@ -1303,6 +1380,8 @@ static int __spi_queued_transfer(struct spi_device *spi,
* spi_queued_transfer - transfer function for queued transfers
* @spi: spi device which is requesting transfer
* @msg: spi message which is to handled is queued to driver queue
+ *
+ * Return: zero on success, else a negative error code.
*/
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{
@@ -1433,9 +1512,6 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
}
spi->max_speed_hz = value;
- /* IRQ */
- spi->irq = irq_of_parse_and_map(nc, 0);
-
/* Store a pointer to the node in the device structure */
of_node_get(nc);
spi->dev.of_node = nc;
@@ -1605,12 +1681,13 @@ static struct class spi_master_class = {
* only ones directly touching chip registers. It's how they allocate
* an spi_master structure, prior to calling spi_register_master().
*
- * This must be called from context that can sleep. It returns the SPI
- * master structure on success, else NULL.
+ * This must be called from context that can sleep.
*
* The caller is responsible for assigning the bus number and initializing
* the master's methods before calling spi_register_master(); and (after errors
* adding the device) calling spi_master_put() to prevent a memory leak.
+ *
+ * Return: the SPI master structure on success, else NULL.
*/
struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
{
@@ -1694,6 +1771,8 @@ static int of_spi_register_master(struct spi_master *master)
* success, else a negative error code (dropping the master's refcount).
* After a successful return, the caller is responsible for calling
* spi_unregister_master().
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_register_master(struct spi_master *master)
{
@@ -1787,6 +1866,8 @@ static void devm_spi_unregister(struct device *dev, void *res)
*
* Register a SPI device as with spi_register_master() which will
* automatically be unregister
+ *
+ * Return: zero on success, else a negative error code.
*/
int devm_spi_register_master(struct device *dev, struct spi_master *master)
{
@@ -1892,6 +1973,8 @@ static int __spi_master_match(struct device *dev, const void *data)
* arch init time. It returns a refcounted pointer to the relevant
* spi_master (which the caller must release), or NULL if there is
* no such master registered.
+ *
+ * Return: the SPI master structure on success, else NULL.
*/
struct spi_master *spi_busnum_to_master(u16 bus_num)
{
@@ -1945,11 +2028,13 @@ static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_w
* that the underlying controller or its driver does not support. For
* example, not all hardware supports wire transfers using nine bit words,
* LSB-first wire encoding, or active-high chipselects.
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_setup(struct spi_device *spi)
{
unsigned bad_bits, ugly_bits;
- int status = 0;
+ int status;
/* check mode to prevent that DUAL and QUAD set at the same time
*/
@@ -1986,17 +2071,18 @@ int spi_setup(struct spi_device *spi)
if (!spi->bits_per_word)
spi->bits_per_word = 8;
- if (__spi_validate_bits_per_word(spi->master, spi->bits_per_word))
- return -EINVAL;
+ status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
+ if (status)
+ return status;
if (!spi->max_speed_hz)
spi->max_speed_hz = spi->master->max_speed_hz;
- spi_set_cs(spi, false);
-
if (spi->master->setup)
status = spi->master->setup(spi);
+ spi_set_cs(spi, false);
+
dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
@@ -2162,6 +2248,8 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
* no other spi_message queued to that device will be processed.
* (This rule applies equally to all the synchronous transfer calls,
* which are wrappers around this core asynchronous primitive.)
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_async(struct spi_device *spi, struct spi_message *message)
{
@@ -2214,6 +2302,8 @@ EXPORT_SYMBOL_GPL(spi_async);
* no other spi_message queued to that device will be processed.
* (This rule applies equally to all the synchronous transfer calls,
* which are wrappers around this core asynchronous primitive.)
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_async_locked(struct spi_device *spi, struct spi_message *message)
{
@@ -2329,7 +2419,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
* Also, the caller is guaranteeing that the memory associated with the
* message will not be freed before this call returns.
*
- * It returns zero on success, else a negative error code.
+ * Return: zero on success, else a negative error code.
*/
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
@@ -2351,7 +2441,7 @@ EXPORT_SYMBOL_GPL(spi_sync);
* SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
* be released by a spi_bus_unlock call when the exclusive access is over.
*
- * It returns zero on success, else a negative error code.
+ * Return: zero on success, else a negative error code.
*/
int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
{
@@ -2372,7 +2462,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
* exclusive access is over. Data transfer must be done by spi_sync_locked
* and spi_async_locked calls when the SPI bus lock is held.
*
- * It returns zero on success, else a negative error code.
+ * Return: always zero.
*/
int spi_bus_lock(struct spi_master *master)
{
@@ -2401,7 +2491,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
* This call releases an SPI bus lock previously obtained by an spi_bus_lock
* call.
*
- * It returns zero on success, else a negative error code.
+ * Return: always zero.
*/
int spi_bus_unlock(struct spi_master *master)
{
@@ -2436,6 +2526,8 @@ static u8 *buf;
* portable code should never use this for more than 32 bytes.
* Performance-sensitive or bulk transfer code should instead use
* spi_{async,sync}() calls with dma-safe buffers.
+ *
+ * Return: zero on success, else a negative error code.
*/
int spi_write_then_read(struct spi_device *spi,
const void *txbuf, unsigned n_tx,
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index ef008e52f953..91a0fcd72423 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -788,7 +788,6 @@ static int spidev_remove(struct spi_device *spi)
static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(spidev_dt_ids),
},
.probe = spidev_probe,
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 20e69f0b5cb0..3ccdec94fee7 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -327,7 +327,6 @@ MODULE_DEVICE_TABLE(of, dt_ids); \
static struct spi_driver fbtft_driver_spi_driver = { \
.driver = { \
.name = _name, \
- .owner = THIS_MODULE, \
.of_match_table = of_match_ptr(dt_ids), \
}, \
.probe = fbtft_driver_probe_spi, \
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index 704b78c78f13..ce0d254148e4 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -573,7 +573,6 @@ static int flexfb_remove_pdev(struct platform_device *pdev)
static struct spi_driver flexfb_spi_driver = {
.driver = {
.name = DRVNAME,
- .owner = THIS_MODULE,
},
.probe = flexfb_probe_spi,
.remove = flexfb_remove_spi,
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index 10db685813c9..06c0b75ed26a 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -235,7 +235,6 @@ static int adis16201_remove(struct spi_device *spi)
static struct spi_driver adis16201_driver = {
.driver = {
.name = "adis16201",
- .owner = THIS_MODULE,
},
.probe = adis16201_probe,
.remove = adis16201_remove,
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index fb593d23d5bc..de5b84ac842b 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -203,7 +203,6 @@ static int adis16203_remove(struct spi_device *spi)
static struct spi_driver adis16203_driver = {
.driver = {
.name = "adis16203",
- .owner = THIS_MODULE,
},
.probe = adis16203_probe,
.remove = adis16203_remove,
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index ea0ac2467ac2..20a9df64f1ed 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -241,7 +241,6 @@ static int adis16204_remove(struct spi_device *spi)
static struct spi_driver adis16204_driver = {
.driver = {
.name = "adis16204",
- .owner = THIS_MODULE,
},
.probe = adis16204_probe,
.remove = adis16204_remove,
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index d1dc1a3cb3ce..8b42bf8c3f60 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -235,7 +235,6 @@ static int adis16209_remove(struct spi_device *spi)
static struct spi_driver adis16209_driver = {
.driver = {
.name = "adis16209",
- .owner = THIS_MODULE,
},
.probe = adis16209_probe,
.remove = adis16209_remove,
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index e46a91c69a31..d0165218b60c 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -482,7 +482,6 @@ static int adis16220_remove(struct spi_device *spi)
static struct spi_driver adis16220_driver = {
.driver = {
.name = "adis16220",
- .owner = THIS_MODULE,
},
.probe = adis16220_probe,
.remove = adis16220_remove,
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index cb074e864408..1b5b685a8691 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -288,7 +288,6 @@ static int adis16240_remove(struct spi_device *spi)
static struct spi_driver adis16240_driver = {
.driver = {
.name = "adis16240",
- .owner = THIS_MODULE,
},
.probe = adis16240_probe,
.remove = adis16240_remove,
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 7a1939a66c93..7939ae6378d7 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -802,7 +802,6 @@ static int lis3l02dq_remove(struct spi_device *spi)
static struct spi_driver lis3l02dq_driver = {
.driver = {
.name = "lis3l02dq",
- .owner = THIS_MODULE,
},
.probe = lis3l02dq_probe,
.remove = lis3l02dq_remove,
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index 03cb22508a5d..02e930c55570 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -1197,7 +1197,6 @@ MODULE_DEVICE_TABLE(spi, sca3000_id);
static struct spi_driver sca3000_driver = {
.driver = {
.name = "sca3000",
- .owner = THIS_MODULE,
},
.probe = sca3000_probe,
.remove = sca3000_remove,
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 5b87049cd3f9..bb40f3728742 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -707,7 +707,6 @@ MODULE_DEVICE_TABLE(spi, ad7192_id);
static struct spi_driver ad7192_driver = {
.driver = {
.name = "ad7192",
- .owner = THIS_MODULE,
},
.probe = ad7192_probe,
.remove = ad7192_remove,
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 45df714cc83a..35acb1a4669b 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -974,7 +974,6 @@ MODULE_DEVICE_TABLE(spi, ad7280_id);
static struct spi_driver ad7280_driver = {
.driver = {
.name = "ad7280",
- .owner = THIS_MODULE,
},
.probe = ad7280_probe,
.remove = ad7280_remove,
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index b88f8825797d..cbb36317200e 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -103,7 +103,6 @@ MODULE_DEVICE_TABLE(spi, ad7606_id);
static struct spi_driver ad7606_driver = {
.driver = {
.name = "ad7606",
- .owner = THIS_MODULE,
.pm = AD7606_SPI_PM_OPS,
},
.probe = ad7606_spi_probe,
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 618b41faa289..3abc7789237f 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -264,7 +264,6 @@ MODULE_DEVICE_TABLE(spi, ad7780_id);
static struct spi_driver ad7780_driver = {
.driver = {
.name = "ad7780",
- .owner = THIS_MODULE,
},
.probe = ad7780_probe,
.remove = ad7780_remove,
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index ccec57c5f70d..c8e156646528 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -432,7 +432,6 @@ MODULE_DEVICE_TABLE(spi, ad7816_id);
static struct spi_driver ad7816_driver = {
.driver = {
.name = "ad7816",
- .owner = THIS_MODULE,
},
.probe = ad7816_probe,
.id_table = ad7816_id,
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index e480abb72e4a..5cd22743e140 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -132,7 +132,6 @@ static struct spi_driver adt7316_driver = {
.driver = {
.name = "adt7316",
.pm = ADT7316_PM_OPS,
- .owner = THIS_MODULE,
},
.probe = adt7316_spi_probe,
.id_table = adt7316_spi_id,
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index a861fe0149b1..2b65faa6296a 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -339,7 +339,6 @@ MODULE_DEVICE_TABLE(spi, ad9832_id);
static struct spi_driver ad9832_driver = {
.driver = {
.name = "ad9832",
- .owner = THIS_MODULE,
},
.probe = ad9832_probe,
.remove = ad9832_remove,
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index fcffe2c11685..6464f2cbe94b 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -446,7 +446,6 @@ MODULE_DEVICE_TABLE(spi, ad9834_id);
static struct spi_driver ad9834_driver = {
.driver = {
.name = "ad9834",
- .owner = THIS_MODULE,
},
.probe = ad9834_probe,
.remove = ad9834_remove,
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index 981b63f83a7b..ab816a215eb8 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -208,7 +208,6 @@ static int adis16060_w_remove(struct spi_device *spi)
static struct spi_driver adis16060_r_driver = {
.driver = {
.name = "adis16060_r",
- .owner = THIS_MODULE,
},
.probe = adis16060_r_probe,
};
@@ -216,7 +215,6 @@ static struct spi_driver adis16060_r_driver = {
static struct spi_driver adis16060_w_driver = {
.driver = {
.name = "adis16060_w",
- .owner = THIS_MODULE,
},
.probe = adis16060_w_probe,
.remove = adis16060_w_remove,
diff --git a/drivers/staging/iio/magnetometer/hmc5843_spi.c b/drivers/staging/iio/magnetometer/hmc5843_spi.c
index 1549192c0dec..8be198058ea2 100644
--- a/drivers/staging/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/staging/iio/magnetometer/hmc5843_spi.c
@@ -88,7 +88,6 @@ static struct spi_driver hmc5843_driver = {
.driver = {
.name = "hmc5843",
.pm = HMC5843_PM_OPS,
- .owner = THIS_MODULE,
},
.id_table = hmc5843_id,
.probe = hmc5843_spi_probe,
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 188830d7e257..f129039bece3 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -534,7 +534,6 @@ static int ade7753_remove(struct spi_device *spi)
static struct spi_driver ade7753_driver = {
.driver = {
.name = "ade7753",
- .owner = THIS_MODULE,
},
.probe = ade7753_probe,
.remove = ade7753_remove,
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index 664c6e5f76b1..1e950685e12f 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -575,7 +575,6 @@ static int ade7754_remove(struct spi_device *spi)
static struct spi_driver ade7754_driver = {
.driver = {
.name = "ade7754",
- .owner = THIS_MODULE,
},
.probe = ade7754_probe,
.remove = ade7754_remove,
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 38838085824f..0db23e4d1852 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -904,7 +904,6 @@ MODULE_DEVICE_TABLE(spi, ade7758_id);
static struct spi_driver ade7758_driver = {
.driver = {
.name = "ade7758",
- .owner = THIS_MODULE,
},
.probe = ade7758_probe,
.remove = ade7758_remove,
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 11c1edcc1ed6..684e612a88b9 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -490,7 +490,6 @@ static int ade7759_remove(struct spi_device *spi)
static struct spi_driver ade7759_driver = {
.driver = {
.name = "ade7759",
- .owner = THIS_MODULE,
},
.probe = ade7759_probe,
.remove = ade7759_remove,
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index da774866512c..2413052c5bfb 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -314,7 +314,6 @@ MODULE_DEVICE_TABLE(spi, ade7854_id);
static struct spi_driver ade7854_driver = {
.driver = {
.name = "ade7854",
- .owner = THIS_MODULE,
},
.probe = ade7854_spi_probe,
.remove = ade7854_spi_remove,
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index c17893b4918c..595e711d35a6 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -155,7 +155,6 @@ MODULE_DEVICE_TABLE(spi, ad2s1200_id);
static struct spi_driver ad2s1200_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = ad2s1200_probe,
.id_table = ad2s1200_id,
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 3bd65f5c9cf5..d97aa2827412 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -735,7 +735,6 @@ MODULE_DEVICE_TABLE(spi, ad2s1210_id);
static struct spi_driver ad2s1210_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = ad2s1210_probe,
.remove = ad2s1210_remove,
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index c57a29616223..5b1c0db33e7f 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -100,7 +100,6 @@ MODULE_DEVICE_TABLE(spi, ad2s90_id);
static struct spi_driver ad2s90_driver = {
.driver = {
.name = "ad2s90",
- .owner = THIS_MODULE,
},
.probe = ad2s90_probe,
.id_table = ad2s90_id,
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 11bf2c60c31a..02f27593013e 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -2750,13 +2750,9 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL,
op_data, &lockh, &flock, 0, NULL /* req */, flags);
- if ((file_lock->fl_flags & FL_FLOCK) &&
- (rc == 0 || file_lock->fl_type == F_UNLCK))
- rc2 = flock_lock_file_wait(file, file_lock);
- if ((file_lock->fl_flags & FL_POSIX) &&
- (rc == 0 || file_lock->fl_type == F_UNLCK) &&
+ if ((rc == 0 || file_lock->fl_type == F_UNLCK) &&
!(flags & LDLM_FL_TEST_LOCK))
- rc2 = posix_lock_file_wait(file, file_lock);
+ rc2 = locks_lock_file_wait(file, file_lock);
if (rc2 && file_lock->fl_type != F_UNLCK) {
einfo.ei_mode = LCK_NL;
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index fb55e5941445..b10d6016b993 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -613,7 +613,7 @@ static int bcm2048_set_fm_frequency(struct bcm2048_device *bdev, u32 frequency)
static int bcm2048_get_fm_frequency(struct bcm2048_device *bdev)
{
int err;
- u8 lsb, msb;
+ u8 lsb = 0, msb = 0;
mutex_lock(&bdev->mutex);
@@ -658,7 +658,7 @@ static int bcm2048_set_fm_af_frequency(struct bcm2048_device *bdev,
static int bcm2048_get_fm_af_frequency(struct bcm2048_device *bdev)
{
int err;
- u8 lsb, msb;
+ u8 lsb = 0, msb = 0;
mutex_lock(&bdev->mutex);
@@ -1052,7 +1052,7 @@ static int bcm2048_set_rds_b_block_mask(struct bcm2048_device *bdev, u16 mask)
static int bcm2048_get_rds_b_block_mask(struct bcm2048_device *bdev)
{
int err;
- u8 lsb, msb;
+ u8 lsb = 0, msb = 0;
mutex_lock(&bdev->mutex);
@@ -1088,7 +1088,7 @@ static int bcm2048_set_rds_b_block_match(struct bcm2048_device *bdev,
static int bcm2048_get_rds_b_block_match(struct bcm2048_device *bdev)
{
int err;
- u8 lsb, msb;
+ u8 lsb = 0, msb = 0;
mutex_lock(&bdev->mutex);
@@ -1123,7 +1123,7 @@ static int bcm2048_set_rds_pi_mask(struct bcm2048_device *bdev, u16 mask)
static int bcm2048_get_rds_pi_mask(struct bcm2048_device *bdev)
{
int err;
- u8 lsb, msb;
+ u8 lsb = 0, msb = 0;
mutex_lock(&bdev->mutex);
@@ -1158,7 +1158,7 @@ static int bcm2048_set_rds_pi_match(struct bcm2048_device *bdev, u16 match)
static int bcm2048_get_rds_pi_match(struct bcm2048_device *bdev)
{
int err;
- u8 lsb, msb;
+ u8 lsb = 0, msb = 0;
mutex_lock(&bdev->mutex);
@@ -1193,7 +1193,7 @@ static int bcm2048_set_fm_rds_mask(struct bcm2048_device *bdev, u16 mask)
static int bcm2048_get_fm_rds_mask(struct bcm2048_device *bdev)
{
int err;
- u8 value0, value1;
+ u8 value0 = 0, value1 = 0;
mutex_lock(&bdev->mutex);
@@ -1211,7 +1211,7 @@ static int bcm2048_get_fm_rds_mask(struct bcm2048_device *bdev)
static int bcm2048_get_fm_rds_flags(struct bcm2048_device *bdev)
{
int err;
- u8 value0, value1;
+ u8 value0 = 0, value1 = 0;
mutex_lock(&bdev->mutex);
@@ -1239,7 +1239,7 @@ static int bcm2048_get_region_top_frequency(struct bcm2048_device *bdev)
static int bcm2048_set_fm_best_tune_mode(struct bcm2048_device *bdev, u8 mode)
{
int err;
- u8 value;
+ u8 value = 0;
mutex_lock(&bdev->mutex);
@@ -1913,7 +1913,7 @@ unlock:
static void bcm2048_work(struct work_struct *work)
{
struct bcm2048_device *bdev;
- u8 flag_lsb, flag_msb, flags;
+ u8 flag_lsb = 0, flag_msb = 0, flags;
bdev = container_of(work, struct bcm2048_device, work);
bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &flag_lsb);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 87048a14c34d..0fdff91624fd 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -428,8 +428,8 @@ vpfe_video_get_next_buffer(struct vpfe_video_device *video)
struct vpfe_cap_buffer, list);
list_del(&video->next_frm->list);
- video->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
- return vb2_dma_contig_plane_dma_addr(&video->next_frm->vb, 0);
+ video->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ return vb2_dma_contig_plane_dma_addr(&video->next_frm->vb.vb2_buf, 0);
}
/* schedule the next buffer which is available on dma queue */
@@ -448,8 +448,8 @@ void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video)
video->cur_frm = video->next_frm;
list_del(&video->next_frm->list);
- video->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
- addr = vb2_dma_contig_plane_dma_addr(&video->next_frm->vb, 0);
+ video->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ addr = vb2_dma_contig_plane_dma_addr(&video->next_frm->vb.vb2_buf, 0);
video->ops->queue(vpfe_dev, addr);
video->state = VPFE_VIDEO_BUFFER_QUEUED;
}
@@ -460,7 +460,7 @@ void vpfe_video_schedule_bottom_field(struct vpfe_video_device *video)
struct vpfe_device *vpfe_dev = video->vpfe_dev;
unsigned long addr;
- addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb.vb2_buf, 0);
addr += video->field_off;
video->ops->queue(vpfe_dev, addr);
}
@@ -470,8 +470,8 @@ void vpfe_video_process_buffer_complete(struct vpfe_video_device *video)
{
struct vpfe_pipeline *pipe = &video->pipe;
- v4l2_get_timestamp(&video->cur_frm->vb.v4l2_buf.timestamp);
- vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_DONE);
+ v4l2_get_timestamp(&video->cur_frm->vb.timestamp);
+ vb2_buffer_done(&video->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
video->cur_frm = video->next_frm;
}
@@ -1078,7 +1078,7 @@ vpfe_g_dv_timings(struct file *file, void *fh,
* the buffer nbuffers and buffer size
*/
static int
-vpfe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+vpfe_buffer_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -1138,12 +1138,13 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb)
static void vpfe_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
/* Get the file handle object and device object */
struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
struct vpfe_video_device *video = fh->video;
struct vpfe_device *vpfe_dev = video->vpfe_dev;
struct vpfe_pipeline *pipe = &video->pipe;
- struct vpfe_cap_buffer *buf = container_of(vb,
+ struct vpfe_cap_buffer *buf = container_of(vbuf,
struct vpfe_cap_buffer, vb);
unsigned long flags;
unsigned long empty;
@@ -1203,10 +1204,10 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
/* Remove buffer from the buffer queue */
list_del(&video->cur_frm->list);
/* Mark state of the current frame to active */
- video->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ video->cur_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
/* Initialize field_id and started member */
video->field_id = 0;
- addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb, 0);
+ addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb.vb2_buf, 0);
video->ops->queue(vpfe_dev, addr);
video->state = VPFE_VIDEO_BUFFER_QUEUED;
@@ -1214,10 +1215,12 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret) {
struct vpfe_cap_buffer *buf, *tmp;
- vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
list_for_each_entry_safe(buf, tmp, &video->dma_queue, list) {
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
}
goto unlock_out;
}
@@ -1234,7 +1237,8 @@ streamoff:
static int vpfe_buffer_init(struct vb2_buffer *vb)
{
- struct vpfe_cap_buffer *buf = container_of(vb,
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpfe_cap_buffer *buf = container_of(vbuf,
struct vpfe_cap_buffer, vb);
INIT_LIST_HEAD(&buf->list);
@@ -1249,13 +1253,14 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
/* release all active buffers */
if (video->cur_frm == video->next_frm) {
- vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
} else {
if (video->cur_frm != NULL)
- vb2_buffer_done(&video->cur_frm->vb,
+ vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
if (video->next_frm != NULL)
- vb2_buffer_done(&video->next_frm->vb,
+ vb2_buffer_done(&video->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
@@ -1263,16 +1268,18 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
video->next_frm = list_entry(video->dma_queue.next,
struct vpfe_cap_buffer, list);
list_del(&video->next_frm->list);
- vb2_buffer_done(&video->next_frm->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&video->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
}
}
static void vpfe_buf_cleanup(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
struct vpfe_video_device *video = fh->video;
struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_cap_buffer *buf = container_of(vb,
+ struct vpfe_cap_buffer *buf = container_of(vbuf,
struct vpfe_cap_buffer, vb);
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buf_cleanup\n");
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.h b/drivers/staging/media/davinci_vpfe/vpfe_video.h
index 1b1b6c4a56b7..673cefe3ef61 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.h
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.h
@@ -22,6 +22,7 @@
#ifndef _DAVINCI_VPFE_VIDEO_H
#define _DAVINCI_VPFE_VIDEO_H
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
struct vpfe_device;
@@ -72,7 +73,7 @@ struct vpfe_pipeline {
container_of(vdev, struct vpfe_video_device, video_dev)
struct vpfe_cap_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index bc78da014c33..f2dca69c2bc0 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -181,7 +181,7 @@ static void deregister_from_lirc(struct sasem_context *context)
if (retval)
dev_err(&context->dev->dev,
"%s: unable to deregister from lirc (%d)\n",
- __func__, retval);
+ __func__, retval);
else
dev_info(&context->dev->dev,
"Deregistered Sasem driver (minor:%d)\n", minor);
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 465796a686c4..64a7b2fc5289 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -109,17 +109,9 @@ static bool iommap;
static int ioshift;
static bool softcarrier = true;
static bool share_irq;
-static bool debug;
static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */
static bool txsense; /* 0 = active high, 1 = active low */
-#define dprintk(fmt, args...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG LIRC_DRIVER_NAME ": " \
- fmt, ## args); \
- } while (0)
-
/* forward declarations */
static long send_pulse_irdeo(unsigned long length);
static long send_pulse_homebrew(unsigned long length);
@@ -352,10 +344,9 @@ static int init_timing_params(unsigned int new_duty_cycle,
/* Derive pulse and space from the period */
pulse_width = period * duty_cycle / 100;
space_width = period - pulse_width;
- dprintk("in init_timing_params, freq=%d, duty_cycle=%d, "
- "clk/jiffy=%ld, pulse=%ld, space=%ld\n",
- freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
- pulse_width, space_width);
+ pr_debug("in init_timing_params, freq=%d, duty_cycle=%d, clk/jiffy=%ld, pulse=%ld, space=%ld, conv_us_to_clocks=%ld\n",
+ freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
+ pulse_width, space_width, conv_us_to_clocks);
return 0;
}
#else /* ! USE_RDTSC */
@@ -377,8 +368,8 @@ static int init_timing_params(unsigned int new_duty_cycle,
period = 256 * 1000000L / freq;
pulse_width = period * duty_cycle / 100;
space_width = period - pulse_width;
- dprintk("in init_timing_params, freq=%d pulse=%ld, space=%ld\n",
- freq, pulse_width, space_width);
+ pr_debug("in init_timing_params, freq=%d pulse=%ld, space=%ld\n",
+ freq, pulse_width, space_width);
return 0;
}
#endif /* USE_RDTSC */
@@ -500,7 +491,7 @@ static void rbwrite(int l)
{
if (lirc_buffer_full(&rbuf)) {
/* no new signals will be accepted */
- dprintk("Buffer overrun\n");
+ pr_debug("Buffer overrun\n");
return;
}
lirc_buffer_write(&rbuf, (void *)&l);
@@ -790,7 +781,7 @@ static int lirc_serial_probe(struct platform_device *dev)
dev_info(&dev->dev, "Manually using active %s receiver\n",
sense ? "low" : "high");
- dprintk("Interrupt %d, port %04x obtained\n", irq, io);
+ dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
return 0;
}
@@ -895,7 +886,7 @@ static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
return -ENOIOCTLCMD;
case LIRC_SET_SEND_DUTY_CYCLE:
- dprintk("SET_SEND_DUTY_CYCLE\n");
+ pr_debug("SET_SEND_DUTY_CYCLE\n");
if (!(hardware[type].features&LIRC_CAN_SET_SEND_DUTY_CYCLE))
return -ENOIOCTLCMD;
@@ -907,7 +898,7 @@ static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
return init_timing_params(value, freq);
case LIRC_SET_SEND_CARRIER:
- dprintk("SET_SEND_CARRIER\n");
+ pr_debug("SET_SEND_CARRIER\n");
if (!(hardware[type].features&LIRC_CAN_SET_SEND_CARRIER))
return -ENOIOCTLCMD;
@@ -1102,7 +1093,7 @@ static void __exit lirc_serial_exit_module(void)
{
lirc_unregister_driver(driver.minor);
lirc_serial_exit();
- dprintk("cleaned up module\n");
+ pr_debug("cleaned up module\n");
}
@@ -1153,6 +1144,3 @@ MODULE_PARM_DESC(txsense, "Sense of transmitter circuit"
module_param(softcarrier, bool, S_IRUGO);
MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)");
-
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Enable debugging messages");
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index bd3662ab2db0..aa76ccda5b42 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -1440,12 +1440,13 @@ static int iss_probe(struct platform_device *pdev)
iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_REVISION));
/* Interrupt */
- iss->irq_num = platform_get_irq(pdev, 0);
- if (iss->irq_num <= 0) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
dev_err(iss->dev, "No IRQ resource\n");
ret = -ENODEV;
goto error_iss;
}
+ iss->irq_num = ret;
if (devm_request_irq(iss->dev, iss->irq_num, iss_isr, IRQF_SHARED,
"OMAP4 ISS", iss)) {
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 97d3faa722aa..2a0158bb4974 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -287,7 +287,7 @@ iss_video_check_format(struct iss_video *video, struct iss_video_fh *vfh)
*/
static int iss_video_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
+ const void *parg,
unsigned int *count, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -310,7 +310,8 @@ static int iss_video_queue_setup(struct vb2_queue *vq,
static void iss_video_buf_cleanup(struct vb2_buffer *vb)
{
- struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct iss_buffer *buffer = container_of(vbuf, struct iss_buffer, vb);
if (buffer->iss_addr)
buffer->iss_addr = 0;
@@ -318,8 +319,9 @@ static void iss_video_buf_cleanup(struct vb2_buffer *vb)
static int iss_video_buf_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
- struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+ struct iss_buffer *buffer = container_of(vbuf, struct iss_buffer, vb);
struct iss_video *video = vfh->video;
unsigned long size = vfh->format.fmt.pix.sizeimage;
dma_addr_t addr;
@@ -341,9 +343,10 @@ static int iss_video_buf_prepare(struct vb2_buffer *vb)
static void iss_video_buf_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
struct iss_video *video = vfh->video;
- struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+ struct iss_buffer *buffer = container_of(vbuf, struct iss_buffer, vb);
struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
unsigned long flags;
bool empty;
@@ -419,7 +422,6 @@ struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
enum iss_pipeline_state state;
struct iss_buffer *buf;
unsigned long flags;
- struct timespec ts;
spin_lock_irqsave(&video->qlock, flags);
if (WARN_ON(list_empty(&video->dmaqueue))) {
@@ -432,9 +434,7 @@ struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
list_del(&buf->list);
spin_unlock_irqrestore(&video->qlock, flags);
- ktime_get_ts(&ts);
- buf->vb.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
- buf->vb.v4l2_buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ v4l2_get_timestamp(&buf->vb.timestamp);
/* Do frame number propagation only if this is the output video node.
* Frame number either comes from the CSI receivers or it gets
@@ -443,12 +443,12 @@ struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
* first, so the input number might lag behind by 1 in some cases.
*/
if (video == pipe->output && !pipe->do_propagation)
- buf->vb.v4l2_buf.sequence =
+ buf->vb.sequence =
atomic_inc_return(&pipe->frame_number);
else
- buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);
+ buf->vb.sequence = atomic_read(&pipe->frame_number);
- vb2_buffer_done(&buf->vb, pipe->error ?
+ vb2_buffer_done(&buf->vb.vb2_buf, pipe->error ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
pipe->error = false;
@@ -479,7 +479,7 @@ struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
list);
spin_unlock_irqrestore(&video->qlock, flags);
- buf->vb.state = VB2_BUF_STATE_ACTIVE;
+ buf->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
return buf;
}
@@ -502,7 +502,7 @@ void omap4iss_video_cancel_stream(struct iss_video *video)
buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
vb2_queue_error(video->queue);
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
index f11fce2cb977..41532eda1277 100644
--- a/drivers/staging/media/omap4iss/iss_video.h
+++ b/drivers/staging/media/omap4iss/iss_video.h
@@ -18,7 +18,7 @@
#include <media/media-entity.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#define ISS_VIDEO_DRIVER_NAME "issvideo"
@@ -117,12 +117,12 @@ static inline int iss_pipeline_ready(struct iss_pipeline *pipe)
*/
struct iss_buffer {
/* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
dma_addr_t iss_addr;
};
-#define to_iss_buffer(buf) container_of(buf, struct iss_buffer, buffer)
+#define to_iss_buffer(buf) container_of(buf, struct iss_buffer, vb)
enum iss_video_dmaqueue_flags {
/* Set if DMA queue becomes empty when ISS_PIPELINE_STREAM_CONTINUOUS */
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 20a3f8eeb264..9dba16f1fac4 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -946,7 +946,6 @@ MODULE_DEVICE_TABLE(of, spinand_dt);
static struct spi_driver spinand_driver = {
.driver = {
.name = "mt29f",
- .owner = THIS_MODULE,
.of_match_table = spinand_dt,
},
.probe = spinand_probe,
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index a0cdbf35dcb1..a5d319e4aae6 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -227,10 +227,6 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
if (IS_ERR(d->clk) || !old)
goto out;
- /* Not requesting clock rates below 1.8432Mhz */
- if (baud < 115200)
- baud = 115200;
-
clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, baud * 16);
ret = clk_set_rate(d->clk, rate);
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 536a33b99be9..88246f7e435a 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1362,7 +1362,7 @@ static struct spi_driver ifx_spi_driver = {
.driver = {
.name = DRVNAME,
.pm = &ifx_spi_pm,
- .owner = THIS_MODULE},
+ },
.probe = ifx_spi_spi_probe,
.shutdown = ifx_spi_spi_shutdown,
.remove = ifx_spi_spi_remove,
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 077377259a2c..5c4c280b3207 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -904,7 +904,6 @@ static SIMPLE_DEV_PM_OPS(max3100_pm_ops, max3100_suspend, max3100_resume);
static struct spi_driver max3100_driver = {
.driver = {
.name = "max3100",
- .owner = THIS_MODULE,
.pm = MAX3100_PM_OPS,
},
.probe = max3100_probe,
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 182549f55904..d45133056f51 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1338,7 +1338,6 @@ MODULE_DEVICE_TABLE(spi, max310x_id_table);
static struct spi_driver max310x_uart_driver = {
.driver = {
.name = MAX310X_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(max310x_dt_ids),
.pm = &max310x_pm_ops,
},
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 1ae8aa698fcb..edb5305b9d4d 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1360,7 +1360,6 @@ MODULE_DEVICE_TABLE(spi, sc16is7xx_spi_id_table);
static struct spi_driver sc16is7xx_spi_uart_driver = {
.driver = {
.name = SC16IS7XX_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(sc16is7xx_dt_ids),
},
.probe = sc16is7xx_spi_probe,
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index d617c39a0052..51d4a1703af2 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -41,7 +41,7 @@
* videobuf2 queue operations
*/
-static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+static int uvc_queue_setup(struct vb2_queue *vq, const void *parg,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -61,9 +61,10 @@ static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
static int uvc_buffer_prepare(struct vb2_buffer *vb)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
- struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
- if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
return -EINVAL;
@@ -75,7 +76,7 @@ static int uvc_buffer_prepare(struct vb2_buffer *vb)
buf->state = UVC_BUF_STATE_QUEUED;
buf->mem = vb2_plane_vaddr(vb, 0);
buf->length = vb2_plane_size(vb, 0);
- if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
buf->bytesused = 0;
else
buf->bytesused = vb2_get_plane_payload(vb, 0);
@@ -86,7 +87,8 @@ static int uvc_buffer_prepare(struct vb2_buffer *vb)
static void uvc_buffer_queue(struct vb2_buffer *vb)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
- struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
unsigned long flags;
spin_lock_irqsave(&queue->irqlock, flags);
@@ -98,7 +100,7 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
* directly. The next QBUF call will fail with -ENODEV.
*/
buf->state = UVC_BUF_STATE_ERROR;
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&queue->irqlock, flags);
@@ -242,7 +244,7 @@ void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect)
queue);
list_del(&buf->queue);
buf->state = UVC_BUF_STATE_ERROR;
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
}
/* This must be protected by the irqlock spinlock to avoid race
* conditions between uvc_queue_buffer and the disconnection event that
@@ -314,7 +316,7 @@ struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
buf->length != buf->bytesused) {
buf->state = UVC_BUF_STATE_QUEUED;
- vb2_set_plane_payload(&buf->buf, 0, 0);
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
return buf;
}
@@ -325,12 +327,12 @@ struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
else
nextbuf = NULL;
- buf->buf.v4l2_buf.field = V4L2_FIELD_NONE;
- buf->buf.v4l2_buf.sequence = queue->sequence++;
- v4l2_get_timestamp(&buf->buf.v4l2_buf.timestamp);
+ buf->buf.field = V4L2_FIELD_NONE;
+ buf->buf.sequence = queue->sequence++;
+ v4l2_get_timestamp(&buf->buf.timestamp);
- vb2_set_plane_payload(&buf->buf, 0, buf->bytesused);
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
return nextbuf;
}
diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h
index 01ca9eab3481..ac461a9a1a70 100644
--- a/drivers/usb/gadget/function/uvc_queue.h
+++ b/drivers/usb/gadget/function/uvc_queue.h
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/videodev2.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
/* Maximum frame size in bytes, for sanity checking. */
#define UVC_MAX_FRAME_SIZE (16*1024*1024)
@@ -26,7 +26,7 @@ enum uvc_buffer_state {
};
struct uvc_buffer {
- struct vb2_buffer buf;
+ struct vb2_v4l2_buffer buf;
struct list_head queue;
enum uvc_buffer_state state;
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index fc1fd403973a..bd98706d1ce9 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1944,7 +1944,6 @@ static struct spi_driver max3421_driver = {
.remove = max3421_remove,
.driver = {
.name = "max3421-hcd",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index 454017928ed0..850d86ca685b 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -33,3 +33,4 @@ menuconfig VFIO
source "drivers/vfio/pci/Kconfig"
source "drivers/vfio/platform/Kconfig"
+source "virt/lib/Kconfig"
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 579d83bf5358..02912f180c6d 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -2,6 +2,7 @@ config VFIO_PCI
tristate "VFIO support for PCI devices"
depends on VFIO && PCI && EVENTFD
select VFIO_VIRQFD
+ select IRQ_BYPASS_MANAGER
help
Support for the PCI VFIO bus driver. This is required to make
use of PCI drivers using the VFIO framework.
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 1f577b4ac126..3b3ba15558b7 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -319,6 +319,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
if (vdev->ctx[vector].trigger) {
free_irq(irq, vdev->ctx[vector].trigger);
+ irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
kfree(vdev->ctx[vector].name);
eventfd_ctx_put(vdev->ctx[vector].trigger);
vdev->ctx[vector].trigger = NULL;
@@ -360,6 +361,14 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
return ret;
}
+ vdev->ctx[vector].producer.token = trigger;
+ vdev->ctx[vector].producer.irq = irq;
+ ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
+ if (unlikely(ret))
+ dev_info(&pdev->dev,
+ "irq bypass producer (token %p) registration fails: %d\n",
+ vdev->ctx[vector].producer.token, ret);
+
vdev->ctx[vector].trigger = trigger;
return 0;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index ae0e1b4c1711..0e7394f8f69b 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -13,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/irqbypass.h>
#ifndef VFIO_PCI_PRIVATE_H
#define VFIO_PCI_PRIVATE_H
@@ -29,6 +30,7 @@ struct vfio_pci_irq_ctx {
struct virqfd *mask;
char *name;
bool masked;
+ struct irq_bypass_producer producer;
};
struct vfio_pci_device {
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 5f897f99cc9b..5cca8ce45d4d 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -556,7 +556,6 @@ static void ams369fg06_shutdown(struct spi_device *spi)
static struct spi_driver ams369fg06_driver = {
.driver = {
.name = "ams369fg06",
- .owner = THIS_MODULE,
.pm = &ams369fg06_pm_ops,
},
.probe = ams369fg06_probe,
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index d7c37a8ccd1f..d7c239ea3d09 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -598,7 +598,6 @@ static int corgi_lcd_remove(struct spi_device *spi)
static struct spi_driver corgi_lcd_driver = {
.driver = {
.name = "corgi-lcd",
- .owner = THIS_MODULE,
.pm = &corgi_lcd_pm_ops,
},
.probe = corgi_lcd_probe,
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index e7f0890cc211..a9e9cef20ed6 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -536,7 +536,6 @@ static int ili922x_remove(struct spi_device *spi)
static struct spi_driver ili922x_driver = {
.driver = {
.name = "ili922x",
- .owner = THIS_MODULE,
},
.probe = ili922x_probe,
.remove = ili922x_remove,
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 5fa2649c9631..e6054e2492c5 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -255,7 +255,6 @@ static void l4f00242t03_shutdown(struct spi_device *spi)
static struct spi_driver l4f00242t03_driver = {
.driver = {
.name = "l4f00242t03",
- .owner = THIS_MODULE,
},
.probe = l4f00242t03_probe,
.remove = l4f00242t03_remove,
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index f71eaf10c4eb..677f8abba27c 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -797,7 +797,6 @@ static void ld9040_shutdown(struct spi_device *spi)
static struct spi_driver ld9040_driver = {
.driver = {
.name = "ld9040",
- .owner = THIS_MODULE,
.pm = &ld9040_pm_ops,
},
.probe = ld9040_probe,
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 14590c54aedf..4237aaa7f269 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -192,7 +192,6 @@ static int lms283gf05_probe(struct spi_device *spi)
static struct spi_driver lms283gf05_driver = {
.driver = {
.name = "lms283gf05",
- .owner = THIS_MODULE,
},
.probe = lms283gf05_probe,
};
diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c
index 7e3810308c3e..8aa3e7662496 100644
--- a/drivers/video/backlight/lms501kf03.c
+++ b/drivers/video/backlight/lms501kf03.c
@@ -422,7 +422,6 @@ static void lms501kf03_shutdown(struct spi_device *spi)
static struct spi_driver lms501kf03_driver = {
.driver = {
.name = "lms501kf03",
- .owner = THIS_MODULE,
.pm = &lms501kf03_pm_ops,
},
.probe = lms501kf03_probe,
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 383f550e165e..885612cc1008 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -295,7 +295,6 @@ static void ltv350qv_shutdown(struct spi_device *spi)
static struct spi_driver ltv350qv_driver = {
.driver = {
.name = "ltv350qv",
- .owner = THIS_MODULE,
.pm = &ltv350qv_pm_ops,
},
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index 28bfa127fee4..3c4a22a3063a 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -842,7 +842,6 @@ static void s6e63m0_shutdown(struct spi_device *spi)
static struct spi_driver s6e63m0_driver = {
.driver = {
.name = "s6e63m0",
- .owner = THIS_MODULE,
.pm = &s6e63m0_pm_ops,
},
.probe = s6e63m0_probe,
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 30afce33ef2a..eab1f842f9c0 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -437,7 +437,6 @@ static void tdo24m_shutdown(struct spi_device *spi)
static struct spi_driver tdo24m_driver = {
.driver = {
.name = "tdo24m",
- .owner = THIS_MODULE,
.pm = &tdo24m_pm_ops,
},
.probe = tdo24m_probe,
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index f08d641ccd01..6a41ea92737a 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -263,7 +263,6 @@ static SIMPLE_DEV_PM_OPS(tosa_lcd_pm_ops, tosa_lcd_suspend, tosa_lcd_resume);
static struct spi_driver tosa_lcd_driver = {
.driver = {
.name = "tosa-lcd",
- .owner = THIS_MODULE,
.pm = &tosa_lcd_pm_ops,
},
.probe = tosa_lcd_probe,
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c
index d538947a67d3..242a9948f57f 100644
--- a/drivers/video/backlight/vgg2432a4.c
+++ b/drivers/video/backlight/vgg2432a4.c
@@ -251,7 +251,6 @@ static SIMPLE_DEV_PM_OPS(vgg2432a4_pm_ops, vgg2432a4_suspend, vgg2432a4_resume);
static struct spi_driver vgg2432a4_driver = {
.driver = {
.name = "VGG2432A4",
- .owner = THIS_MODULE,
.pm = &vgg2432a4_pm_ops,
},
.probe = vgg2432a4_probe,
diff --git a/drivers/video/fbdev/mmp/panel/tpo_tj032md01bw.c b/drivers/video/fbdev/mmp/panel/tpo_tj032md01bw.c
index 998978b08f5e..f7e85d1c9f9c 100644
--- a/drivers/video/fbdev/mmp/panel/tpo_tj032md01bw.c
+++ b/drivers/video/fbdev/mmp/panel/tpo_tj032md01bw.c
@@ -175,7 +175,6 @@ static int tpohvga_probe(struct spi_device *spi)
static struct spi_driver panel_tpohvga_driver = {
.driver = {
.name = "tpo-hvga",
- .owner = THIS_MODULE,
},
.probe = tpohvga_probe,
};
diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c
index 803fee618d57..0e4cee9a8d79 100644
--- a/drivers/video/fbdev/omap/lcd_mipid.c
+++ b/drivers/video/fbdev/omap/lcd_mipid.c
@@ -603,7 +603,6 @@ static int mipid_spi_remove(struct spi_device *spi)
static struct spi_driver mipid_spi_driver = {
.driver = {
.name = MIPID_MODULE_NAME,
- .owner = THIS_MODULE,
},
.probe = mipid_spi_probe,
.remove = mipid_spi_remove,
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
index 6a1b6a89a928..18eb60e9c9ec 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
@@ -391,7 +391,6 @@ static struct spi_driver lb035q02_spi_driver = {
.remove = lb035q02_panel_spi_remove,
.driver = {
.name = "panel_lgphilips_lb035q02",
- .owner = THIS_MODULE,
.of_match_table = lb035q02_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c
index ccf3f4f3c703..8a928c9a2fc9 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c
@@ -421,7 +421,6 @@ MODULE_DEVICE_TABLE(of, nec_8048_of_match);
static struct spi_driver nec_8048_driver = {
.driver = {
.name = "panel-nec-nl8048hl11",
- .owner = THIS_MODULE,
.pm = NEC_8048_PM_OPS,
.of_match_table = nec_8048_of_match,
.suppress_bind_attrs = true,
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
index c581231c74a5..31efcca801bd 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
@@ -903,7 +903,6 @@ MODULE_DEVICE_TABLE(of, acx565akm_of_match);
static struct spi_driver acx565akm_driver = {
.driver = {
.name = "acx565akm",
- .owner = THIS_MODULE,
.of_match_table = acx565akm_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
index 9edc51133c59..4d657f3ab679 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
@@ -498,7 +498,6 @@ static struct spi_driver td028ttec1_spi_driver = {
.driver = {
.name = "panel-tpo-td028ttec1",
- .owner = THIS_MODULE,
.of_match_table = td028ttec1_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
index 79e4a029aab9..68e3b68a2920 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
@@ -670,7 +670,6 @@ MODULE_DEVICE_TABLE(of, tpo_td043_of_match);
static struct spi_driver tpo_td043_spi_driver = {
.driver = {
.name = "panel-tpo-td043mtea1",
- .owner = THIS_MODULE,
.pm = &tpo_td043_spi_pm,
.of_match_table = tpo_td043_of_match,
.suppress_bind_attrs = true,
diff --git a/drivers/w1/slaves/w1_bq27000.c b/drivers/w1/slaves/w1_bq27000.c
index caafb1722783..9f4a86b754ba 100644
--- a/drivers/w1/slaves/w1_bq27000.c
+++ b/drivers/w1/slaves/w1_bq27000.c
@@ -15,7 +15,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
-#include <linux/power/bq27x00_battery.h>
+#include <linux/power/bq27xxx_battery.h>
#include "../w1.h"
#include "../w1_int.h"
@@ -39,9 +39,10 @@ static int w1_bq27000_read(struct device *dev, unsigned int reg)
return val;
}
-static struct bq27000_platform_data bq27000_battery_info = {
+static struct bq27xxx_platform_data bq27000_battery_info = {
.read = w1_bq27000_read,
.name = "bq27000-battery",
+ .chip = BQ27000,
};
static int w1_bq27000_add_slave(struct w1_slave *sl)
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 6b747394f6f5..7bf835f85bc8 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -161,7 +161,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
BUG();
- res = posix_lock_file_wait(filp, fl);
+ res = locks_lock_file_wait(filp, fl);
if (res < 0)
goto out;
@@ -232,7 +232,7 @@ out_unlock:
fl_type = fl->fl_type;
fl->fl_type = F_UNLCK;
/* Even if this fails we want to return the remote error */
- posix_lock_file_wait(filp, fl);
+ locks_lock_file_wait(filp, fl);
fl->fl_type = fl_type;
}
out:
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 6706bde9ad1b..a2cb0c254060 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -228,12 +228,12 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
file, lock_cmd, wait, fl);
if (!err) {
- err = flock_lock_file_wait(file, fl);
+ err = locks_lock_file_wait(file, fl);
if (err) {
ceph_lock_message(CEPH_LOCK_FLOCK,
CEPH_MDS_OP_SETFILELOCK,
file, CEPH_LOCK_UNLOCK, 0, fl);
- dout("got %d on flock_lock_file_wait, undid lock", err);
+ dout("got %d on locks_lock_file_wait, undid lock", err);
}
}
return err;
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index f4cf200b3c76..6908080e9b6d 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -42,7 +42,7 @@ cifs_spnego_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
goto error;
/* attach the data */
- key->payload.data = payload;
+ key->payload.data[0] = payload;
ret = 0;
error:
@@ -52,7 +52,7 @@ error:
static void
cifs_spnego_key_destroy(struct key *key)
{
- kfree(key->payload.data);
+ kfree(key->payload.data[0]);
}
@@ -167,7 +167,7 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
#ifdef CONFIG_CIFS_DEBUG2
if (cifsFYI && !IS_ERR(spnego_key)) {
- struct cifs_spnego_msg *msg = spnego_key->payload.data;
+ struct cifs_spnego_msg *msg = spnego_key->payload.data[0];
cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024U,
msg->secblob_len + msg->sesskey_len));
}
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 1ea780bc6376..3f93125916bf 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -58,16 +58,15 @@ cifs_idmap_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
* dereference payload.data!
*/
if (prep->datalen <= sizeof(key->payload)) {
- key->payload.value = 0;
- memcpy(&key->payload.value, prep->data, prep->datalen);
- key->datalen = prep->datalen;
- return 0;
+ key->payload.data[0] = NULL;
+ memcpy(&key->payload, prep->data, prep->datalen);
+ } else {
+ payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
+ if (!payload)
+ return -ENOMEM;
+ key->payload.data[0] = payload;
}
- payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
- if (!payload)
- return -ENOMEM;
- key->payload.data = payload;
key->datalen = prep->datalen;
return 0;
}
@@ -76,7 +75,7 @@ static inline void
cifs_idmap_key_destroy(struct key *key)
{
if (key->datalen > sizeof(key->payload))
- kfree(key->payload.data);
+ kfree(key->payload.data[0]);
}
static struct key_type cifs_idmap_key_type = {
@@ -233,8 +232,8 @@ id_to_sid(unsigned int cid, uint sidtype, struct cifs_sid *ssid)
* it could be.
*/
ksid = sidkey->datalen <= sizeof(sidkey->payload) ?
- (struct cifs_sid *)&sidkey->payload.value :
- (struct cifs_sid *)sidkey->payload.data;
+ (struct cifs_sid *)&sidkey->payload :
+ (struct cifs_sid *)sidkey->payload.data[0];
ksid_size = CIFS_SID_BASE_SIZE + (ksid->num_subauth * sizeof(__le32));
if (ksid_size > sidkey->datalen) {
@@ -307,14 +306,14 @@ sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
if (sidtype == SIDOWNER) {
kuid_t uid;
uid_t id;
- memcpy(&id, &sidkey->payload.value, sizeof(uid_t));
+ memcpy(&id, &sidkey->payload.data[0], sizeof(uid_t));
uid = make_kuid(&init_user_ns, id);
if (uid_valid(uid))
fuid = uid;
} else {
kgid_t gid;
gid_t id;
- memcpy(&id, &sidkey->payload.value, sizeof(gid_t));
+ memcpy(&id, &sidkey->payload.data[0], sizeof(gid_t));
gid = make_kgid(&init_user_ns, id);
if (gid_valid(gid))
fgid = gid;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 773f4dc77630..3f2228570d44 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2325,13 +2325,14 @@ static int
cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
{
int rc = 0;
- char *desc, *delim, *payload;
+ const char *delim, *payload;
+ char *desc;
ssize_t len;
struct key *key;
struct TCP_Server_Info *server = ses->server;
struct sockaddr_in *sa;
struct sockaddr_in6 *sa6;
- struct user_key_payload *upayload;
+ const struct user_key_payload *upayload;
desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
if (!desc)
@@ -2374,14 +2375,14 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
}
down_read(&key->sem);
- upayload = key->payload.data;
+ upayload = user_key_payload(key);
if (IS_ERR_OR_NULL(upayload)) {
rc = upayload ? PTR_ERR(upayload) : -EINVAL;
goto out_key_put;
}
/* find first : in payload */
- payload = (char *)upayload->data;
+ payload = upayload->data;
delim = strnchr(payload, upayload->datalen, ':');
cifs_dbg(FYI, "payload=%s\n", payload);
if (!delim) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 62203c387db4..47c5c97e2dd3 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1553,7 +1553,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
out:
if (flock->fl_flags & FL_POSIX && !rc)
- rc = posix_lock_file_wait(file, flock);
+ rc = locks_lock_file_wait(file, flock);
return rc;
}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index bce6fdcd5d48..59727e32ed0f 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -988,7 +988,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
goto out;
}
- msg = spnego_key->payload.data;
+ msg = spnego_key->payload.data[0];
/*
* check version field to make sure that cifs.upcall is
* sending us a response in an expected form
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 597a417ba94d..61276929d139 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -660,7 +660,7 @@ ssetup_ntlmssp_authenticate:
goto ssetup_exit;
}
- msg = spnego_key->payload.data;
+ msg = spnego_key->payload.data[0];
/*
* check version field to make sure that cifs.upcall is
* sending us a response in an expected form
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index 5532f097f6da..d401425f602a 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -145,7 +145,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
send_op(op);
if (xop->callback == NULL) {
- rv = wait_event_killable(recv_wq, (op->done != 0));
+ rv = wait_event_interruptible(recv_wq, (op->done != 0));
if (rv == -ERESTARTSYS) {
log_debug(ls, "dlm_posix_lock: wait killed %llx",
(unsigned long long)number);
@@ -172,7 +172,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
rv = op->info.rv;
if (!rv) {
- if (posix_lock_file_wait(file, fl) < 0)
+ if (locks_lock_file_wait(file, fl) < 0)
log_error(ls, "dlm_posix_lock: vfs lock error %llx",
(unsigned long long)number);
}
@@ -262,7 +262,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
/* cause the vfs unlock to return ENOENT if lock is not found */
fl->fl_flags |= FL_EXISTS;
- rv = posix_lock_file_wait(file, fl);
+ rv = locks_lock_file_wait(file, fl);
if (rv == -ENOENT) {
rv = 0;
goto out_free;
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 5ba029e627cc..7b39260c7bba 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -86,7 +86,7 @@ ecryptfs_get_encrypted_key_payload_data(struct key *key)
{
if (key->type == &key_type_encrypted)
return (struct ecryptfs_auth_tok *)
- (&((struct encrypted_key_payload *)key->payload.data)->payload_data);
+ (&((struct encrypted_key_payload *)key->payload.data[0])->payload_data);
else
return NULL;
}
@@ -117,8 +117,7 @@ ecryptfs_get_key_payload_data(struct key *key)
auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
if (!auth_tok)
- return (struct ecryptfs_auth_tok *)
- (((struct user_key_payload *)key->payload.data)->data);
+ return (struct ecryptfs_auth_tok *)user_key_payload(key)->data;
else
return auth_tok;
}
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
index 1d510c11b100..5c52c79dea46 100644
--- a/fs/ext4/crypto_key.c
+++ b/fs/ext4/crypto_key.c
@@ -121,7 +121,7 @@ int _ext4_get_encryption_info(struct inode *inode)
struct key *keyring_key = NULL;
struct ext4_encryption_key *master_key;
struct ext4_encryption_context ctx;
- struct user_key_payload *ukp;
+ const struct user_key_payload *ukp;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct crypto_ablkcipher *ctfm;
const char *cipher_str;
@@ -209,7 +209,7 @@ retry:
}
crypt_info->ci_keyring_key = keyring_key;
BUG_ON(keyring_key->type != &key_type_logon);
- ukp = ((struct user_key_payload *)keyring_key->payload.data);
+ ukp = user_key_payload(keyring_key);
if (ukp->datalen != sizeof(struct ext4_encryption_key)) {
res = -EINVAL;
goto out;
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index c5a38e352a80..f661d80474be 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -47,7 +47,8 @@ repeat:
/*
* We guarantee no failure on the returned page.
*/
-struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
+ bool is_meta)
{
struct address_space *mapping = META_MAPPING(sbi);
struct page *page;
@@ -58,6 +59,9 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
.blk_addr = index,
.encrypted_page = NULL,
};
+
+ if (unlikely(!is_meta))
+ fio.rw &= ~REQ_META;
repeat:
page = grab_cache_page(mapping, index);
if (!page) {
@@ -91,6 +95,17 @@ out:
return page;
}
+struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+ return __get_meta_page(sbi, index, true);
+}
+
+/* for POR only */
+struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+ return __get_meta_page(sbi, index, false);
+}
+
bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
{
switch (type) {
@@ -125,7 +140,8 @@ bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
/*
* Readahead CP/NAT/SIT/SSA pages
*/
-int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type)
+int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
+ int type, bool sync)
{
block_t prev_blk_addr = 0;
struct page *page;
@@ -133,10 +149,13 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
- .rw = READ_SYNC | REQ_META | REQ_PRIO,
+ .rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
.encrypted_page = NULL,
};
+ if (unlikely(type == META_POR))
+ fio.rw &= ~REQ_META;
+
for (; nrpages-- > 0; blkno++) {
if (!is_valid_blkaddr(sbi, blkno, type))
@@ -196,7 +215,7 @@ void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
f2fs_put_page(page, 0);
if (readahead)
- ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR);
+ ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true);
}
static int f2fs_write_meta_page(struct page *page,
@@ -257,7 +276,7 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
long nr_to_write)
{
struct address_space *mapping = META_MAPPING(sbi);
- pgoff_t index = 0, end = LONG_MAX;
+ pgoff_t index = 0, end = LONG_MAX, prev = LONG_MAX;
struct pagevec pvec;
long nwritten = 0;
struct writeback_control wbc = {
@@ -277,6 +296,13 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
+ if (prev == LONG_MAX)
+ prev = page->index - 1;
+ if (nr_to_write != LONG_MAX && page->index != prev + 1) {
+ pagevec_release(&pvec);
+ goto stop;
+ }
+
lock_page(page);
if (unlikely(page->mapping != mapping)) {
@@ -297,13 +323,14 @@ continue_unlock:
break;
}
nwritten++;
+ prev = page->index;
if (unlikely(nwritten >= nr_to_write))
break;
}
pagevec_release(&pvec);
cond_resched();
}
-
+stop:
if (nwritten)
f2fs_submit_merged_bio(sbi, type, WRITE);
@@ -495,7 +522,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
- ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP);
+ ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
for (i = 0; i < orphan_blocks; i++) {
struct page *page = get_meta_page(sbi, start_blk + i);
@@ -1000,6 +1027,11 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
start_blk = __start_cp_addr(sbi);
+ /* need to wait for end_io results */
+ wait_on_all_pages_writeback(sbi);
+ if (unlikely(f2fs_cp_error(sbi)))
+ return;
+
/* write out checkpoint buffer at block 0 */
update_meta_page(sbi, ckpt, start_blk++);
@@ -1109,6 +1141,9 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (cpc->reason == CP_RECOVERY)
f2fs_msg(sbi->sb, KERN_NOTICE,
"checkpoint: version = %llx", ckpt_ver);
+
+ /* do checkpoint periodically */
+ sbi->cp_expires = round_jiffies_up(jiffies + HZ * sbi->cp_interval);
out:
mutex_unlock(&sbi->cp_mutex);
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
diff --git a/fs/f2fs/crypto_key.c b/fs/f2fs/crypto_key.c
index 9f77de2ef317..5de2d866a25c 100644
--- a/fs/f2fs/crypto_key.c
+++ b/fs/f2fs/crypto_key.c
@@ -122,7 +122,7 @@ int _f2fs_get_encryption_info(struct inode *inode)
struct key *keyring_key = NULL;
struct f2fs_encryption_key *master_key;
struct f2fs_encryption_context ctx;
- struct user_key_payload *ukp;
+ const struct user_key_payload *ukp;
struct crypto_ablkcipher *ctfm;
const char *cipher_str;
char raw_key[F2FS_MAX_KEY_SIZE];
@@ -199,7 +199,7 @@ retry:
}
crypt_info->ci_keyring_key = keyring_key;
BUG_ON(keyring_key->type != &key_type_logon);
- ukp = ((struct user_key_payload *)keyring_key->payload.data);
+ ukp = user_key_payload(keyring_key);
if (ukp->datalen != sizeof(struct f2fs_encryption_key)) {
res = -EINVAL;
goto out;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index a82abe921b89..972eab7ac071 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -275,7 +275,8 @@ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
return f2fs_reserve_block(dn, index);
}
-struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
+struct page *get_read_data_page(struct inode *inode, pgoff_t index,
+ int rw, bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
@@ -292,7 +293,7 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return read_mapping_page(mapping, index, NULL);
- page = grab_cache_page(mapping, index);
+ page = f2fs_grab_cache_page(mapping, index, for_write);
if (!page)
return ERR_PTR(-ENOMEM);
@@ -352,7 +353,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index)
return page;
f2fs_put_page(page, 0);
- page = get_read_data_page(inode, index, READ_SYNC);
+ page = get_read_data_page(inode, index, READ_SYNC, false);
if (IS_ERR(page))
return page;
@@ -372,12 +373,13 @@ struct page *find_data_page(struct inode *inode, pgoff_t index)
* Because, the callers, functions in dir.c and GC, should be able to know
* whether this page exists or not.
*/
-struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
+struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
+ bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
repeat:
- page = get_read_data_page(inode, index, READ_SYNC);
+ page = get_read_data_page(inode, index, READ_SYNC, for_write);
if (IS_ERR(page))
return page;
@@ -411,7 +413,7 @@ struct page *get_new_data_page(struct inode *inode,
struct dnode_of_data dn;
int err;
repeat:
- page = grab_cache_page(mapping, index);
+ page = f2fs_grab_cache_page(mapping, index, true);
if (!page) {
/*
* before exiting, we should make sure ipage will be released
@@ -439,7 +441,7 @@ repeat:
} else {
f2fs_put_page(page, 1);
- page = get_read_data_page(inode, index, READ_SYNC);
+ page = get_read_data_page(inode, index, READ_SYNC, true);
if (IS_ERR(page))
goto repeat;
@@ -447,9 +449,9 @@ repeat:
lock_page(page);
}
got_it:
- if (new_i_size &&
- i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
- i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
+ if (new_i_size && i_size_read(inode) <
+ ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
+ i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
/* Only the directory inode sets new_i_size */
set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
}
@@ -489,8 +491,9 @@ alloc:
/* update i_size */
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
dn->ofs_in_node;
- if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
- i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
+ if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
+ i_size_write(dn->inode,
+ ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
/* direct IO doesn't use extent cache to maximize the performance */
f2fs_drop_largest_extent(dn->inode, fofs);
@@ -523,6 +526,9 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset,
while (dn.ofs_in_node < end_offset && len) {
block_t blkaddr;
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto sync_out;
+
blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
if (__allocate_data_block(&dn))
@@ -565,6 +571,7 @@ static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
{
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
pgoff_t pgofs, end_offset;
int err = 0, ofs = 1;
@@ -595,40 +602,40 @@ static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
err = 0;
goto unlock_out;
}
- if (dn.data_blkaddr == NEW_ADDR) {
- if (flag == F2FS_GET_BLOCK_BMAP) {
- err = -ENOENT;
- goto put_out;
- } else if (flag == F2FS_GET_BLOCK_READ ||
- flag == F2FS_GET_BLOCK_DIO) {
- goto put_out;
+
+ if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
+ if (create) {
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
+ goto put_out;
+ }
+ err = __allocate_data_block(&dn);
+ if (err)
+ goto put_out;
+ allocated = true;
+ map->m_flags = F2FS_MAP_NEW;
+ } else {
+ if (flag != F2FS_GET_BLOCK_FIEMAP ||
+ dn.data_blkaddr != NEW_ADDR) {
+ if (flag == F2FS_GET_BLOCK_BMAP)
+ err = -ENOENT;
+ goto put_out;
+ }
+
+ /*
+ * preallocated unwritten block should be mapped
+ * for fiemap.
+ */
+ if (dn.data_blkaddr == NEW_ADDR)
+ map->m_flags = F2FS_MAP_UNWRITTEN;
}
- /*
- * if it is in fiemap call path (flag = F2FS_GET_BLOCK_FIEMAP),
- * mark it as mapped and unwritten block.
- */
}
- if (dn.data_blkaddr != NULL_ADDR) {
- map->m_flags = F2FS_MAP_MAPPED;
- map->m_pblk = dn.data_blkaddr;
- if (dn.data_blkaddr == NEW_ADDR)
- map->m_flags |= F2FS_MAP_UNWRITTEN;
- } else if (create) {
- err = __allocate_data_block(&dn);
- if (err)
- goto put_out;
- allocated = true;
- map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED;
- map->m_pblk = dn.data_blkaddr;
- } else {
- if (flag == F2FS_GET_BLOCK_BMAP)
- err = -ENOENT;
- goto put_out;
- }
+ map->m_flags |= F2FS_MAP_MAPPED;
+ map->m_pblk = dn.data_blkaddr;
+ map->m_len = 1;
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
- map->m_len = 1;
dn.ofs_in_node++;
pgofs++;
@@ -647,23 +654,35 @@ get_next:
goto unlock_out;
}
- if (dn.data_blkaddr == NEW_ADDR &&
- flag != F2FS_GET_BLOCK_FIEMAP)
- goto put_out;
-
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
}
if (maxblocks > map->m_len) {
block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
- if (blkaddr == NULL_ADDR && create) {
- err = __allocate_data_block(&dn);
- if (err)
- goto sync_out;
- allocated = true;
- map->m_flags |= F2FS_MAP_NEW;
- blkaddr = dn.data_blkaddr;
+
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
+ if (create) {
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
+ goto sync_out;
+ }
+ err = __allocate_data_block(&dn);
+ if (err)
+ goto sync_out;
+ allocated = true;
+ map->m_flags |= F2FS_MAP_NEW;
+ blkaddr = dn.data_blkaddr;
+ } else {
+ /*
+ * we only merge preallocated unwritten blocks
+ * for fiemap.
+ */
+ if (flag != F2FS_GET_BLOCK_FIEMAP ||
+ blkaddr != NEW_ADDR)
+ goto sync_out;
+ }
}
+
/* Give more consecutive addresses for the readahead */
if ((map->m_pblk != NEW_ADDR &&
blkaddr == (map->m_pblk + ofs)) ||
@@ -752,6 +771,12 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (ret)
return ret;
+ if (f2fs_has_inline_data(inode)) {
+ ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
+ if (ret != -EAGAIN)
+ return ret;
+ }
+
mutex_lock(&inode->i_mutex);
if (len >= isize) {
@@ -903,7 +928,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
map.m_lblk = block_in_file;
map.m_len = last_block - block_in_file;
- if (f2fs_map_blocks(inode, &map, 0, false))
+ if (f2fs_map_blocks(inode, &map, 0,
+ F2FS_GET_BLOCK_READ))
goto set_error_page;
}
got_it:
@@ -936,21 +962,14 @@ submit_and_realloc:
if (f2fs_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
- struct page *cpage;
ctx = f2fs_get_crypto_ctx(inode);
if (IS_ERR(ctx))
goto set_error_page;
/* wait the page to be moved by cleaning */
- cpage = find_lock_page(
- META_MAPPING(F2FS_I_SB(inode)),
- block_nr);
- if (cpage) {
- f2fs_wait_on_page_writeback(cpage,
- DATA);
- f2fs_put_page(cpage, 1);
- }
+ f2fs_wait_on_encrypted_page_writeback(
+ F2FS_I_SB(inode), block_nr);
}
bio = bio_alloc(GFP_KERNEL,
@@ -1012,6 +1031,9 @@ static int f2fs_read_data_pages(struct file *file,
struct list_head *pages, unsigned nr_pages)
{
struct inode *inode = file->f_mapping->host;
+ struct page *page = list_entry(pages->prev, struct page, lru);
+
+ trace_f2fs_readpages(inode, page, nr_pages);
/* If the file has inline data, skip readpages */
if (f2fs_has_inline_data(inode))
@@ -1041,6 +1063,11 @@ int do_write_data_page(struct f2fs_io_info *fio)
}
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+
+ /* wait for GCed encrypted page writeback */
+ f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
+ fio->blk_addr);
+
fio->encrypted_page = f2fs_encrypt(inode, fio->page);
if (IS_ERR(fio->encrypted_page)) {
err = PTR_ERR(fio->encrypted_page);
@@ -1429,6 +1456,10 @@ put_next:
f2fs_wait_on_page_writeback(page, DATA);
+ /* wait for GCed encrypted page writeback */
+ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+ f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
+
if (len == PAGE_CACHE_SIZE)
goto out_update;
if (PageUptodate(page))
@@ -1551,10 +1582,16 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
- if (iov_iter_rw(iter) == WRITE)
+ if (iov_iter_rw(iter) == WRITE) {
__allocate_data_blocks(inode, offset, count);
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+ err = -EIO;
+ goto out;
+ }
+ }
err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
+out:
if (err < 0 && iov_iter_rw(iter) == WRITE)
f2fs_write_failed(mapping, offset + count);
@@ -1636,12 +1673,13 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
- /* we don't need to use inline_data strictly */
- if (f2fs_has_inline_data(inode)) {
- int err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
+ if (f2fs_has_inline_data(inode))
+ return 0;
+
+ /* make sure allocating whole blocks */
+ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+ filemap_write_and_wait(mapping);
+
return generic_block_bmap(mapping, block, get_data_block_bmap);
}
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index d013d8479753..478e5d54154f 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -33,11 +33,11 @@ static void update_general_status(struct f2fs_sb_info *sbi)
int i;
/* validation check of the segment numbers */
- si->hit_largest = atomic_read(&sbi->read_hit_largest);
- si->hit_cached = atomic_read(&sbi->read_hit_cached);
- si->hit_rbtree = atomic_read(&sbi->read_hit_rbtree);
+ si->hit_largest = atomic64_read(&sbi->read_hit_largest);
+ si->hit_cached = atomic64_read(&sbi->read_hit_cached);
+ si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
- si->total_ext = atomic_read(&sbi->total_hit_ext);
+ si->total_ext = atomic64_read(&sbi->total_hit_ext);
si->ext_tree = sbi->total_ext_tree;
si->ext_node = atomic_read(&sbi->total_ext_node);
si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
@@ -118,7 +118,7 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
}
}
dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100);
- si->bimodal = div_u64(bimodal, dist);
+ si->bimodal = div64_u64(bimodal, dist);
if (si->dirty_count)
si->avg_vblocks = div_u64(total_vblocks, ndirty);
else
@@ -198,9 +198,9 @@ get_cache:
si->page_mem = 0;
npages = NODE_MAPPING(sbi)->nrpages;
- si->page_mem += npages << PAGE_CACHE_SHIFT;
+ si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
npages = META_MAPPING(sbi)->nrpages;
- si->page_mem += npages << PAGE_CACHE_SHIFT;
+ si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
}
static int stat_show(struct seq_file *s, void *v)
@@ -283,12 +283,12 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - node blocks : %d (%d)\n", si->node_blks,
si->bg_node_blks);
seq_puts(s, "\nExtent Cache:\n");
- seq_printf(s, " - Hit Count: L1-1:%d L1-2:%d L2:%d\n",
+ seq_printf(s, " - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
si->hit_largest, si->hit_cached,
si->hit_rbtree);
- seq_printf(s, " - Hit Ratio: %d%% (%d / %d)\n",
+ seq_printf(s, " - Hit Ratio: %llu%% (%llu / %llu)\n",
!si->total_ext ? 0 :
- (si->hit_total * 100) / si->total_ext,
+ div64_u64(si->hit_total * 100, si->total_ext),
si->hit_total, si->total_ext);
seq_printf(s, " - Inner Struct Count: tree: %d, node: %d\n",
si->ext_tree, si->ext_node);
@@ -333,13 +333,13 @@ static int stat_show(struct seq_file *s, void *v)
/* memory footprint */
update_mem_info(si->sbi);
- seq_printf(s, "\nMemory: %u KB\n",
+ seq_printf(s, "\nMemory: %llu KB\n",
(si->base_mem + si->cache_mem + si->page_mem) >> 10);
- seq_printf(s, " - static: %u KB\n",
+ seq_printf(s, " - static: %llu KB\n",
si->base_mem >> 10);
- seq_printf(s, " - cached: %u KB\n",
+ seq_printf(s, " - cached: %llu KB\n",
si->cache_mem >> 10);
- seq_printf(s, " - paged : %u KB\n",
+ seq_printf(s, " - paged : %llu KB\n",
si->page_mem >> 10);
}
mutex_unlock(&f2fs_stat_mutex);
@@ -378,10 +378,10 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
si->sbi = sbi;
sbi->stat_info = si;
- atomic_set(&sbi->total_hit_ext, 0);
- atomic_set(&sbi->read_hit_rbtree, 0);
- atomic_set(&sbi->read_hit_largest, 0);
- atomic_set(&sbi->read_hit_cached, 0);
+ atomic64_set(&sbi->total_hit_ext, 0);
+ atomic64_set(&sbi->read_hit_rbtree, 0);
+ atomic64_set(&sbi->read_hit_largest, 0);
+ atomic64_set(&sbi->read_hit_cached, 0);
atomic_set(&sbi->inline_xattr, 0);
atomic_set(&sbi->inline_inode, 0);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 8f15fc134040..7c1678ba8f92 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -258,7 +258,7 @@ struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
if (f2fs_has_inline_dentry(dir))
return f2fs_parent_inline_dir(dir, p);
- page = get_lock_data_page(dir, 0);
+ page = get_lock_data_page(dir, 0, false);
if (IS_ERR(page))
return NULL;
@@ -740,7 +740,7 @@ bool f2fs_empty_dir(struct inode *dir)
return f2fs_empty_inline_dir(dir);
for (bidx = 0; bidx < nblock; bidx++) {
- dentry_page = get_lock_data_page(dir, bidx);
+ dentry_page = get_lock_data_page(dir, bidx, false);
if (IS_ERR(dentry_page)) {
if (PTR_ERR(dentry_page) == -ENOENT)
continue;
@@ -787,7 +787,6 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
else
d_type = DT_UNKNOWN;
- /* encrypted case */
de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len);
@@ -795,12 +794,20 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
int save_len = fstr->len;
int ret;
+ de_name.name = kmalloc(de_name.len, GFP_NOFS);
+ if (!de_name.name)
+ return false;
+
+ memcpy(de_name.name, d->filename[bit_pos], de_name.len);
+
ret = f2fs_fname_disk_to_usr(d->inode, &de->hash_code,
&de_name, fstr);
- de_name = *fstr;
- fstr->len = save_len;
+ kfree(de_name.name);
if (ret < 0)
return true;
+
+ de_name = *fstr;
+ fstr->len = save_len;
}
if (!dir_emit(ctx, de_name.name, de_name.len,
@@ -847,7 +854,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
for (; n < npages; n++) {
- dentry_page = get_lock_data_page(inode, n);
+ dentry_page = get_lock_data_page(inode, n, false);
if (IS_ERR(dentry_page))
continue;
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 997ac86f2a1d..7ddba812e11b 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -155,11 +155,12 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
return count - et->count;
}
-static void __drop_largest_extent(struct inode *inode, pgoff_t fofs)
+static void __drop_largest_extent(struct inode *inode,
+ pgoff_t fofs, unsigned int len)
{
struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
- if (largest->fofs <= fofs && largest->fofs + largest->len > fofs)
+ if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs)
largest->len = 0;
}
@@ -168,7 +169,7 @@ void f2fs_drop_largest_extent(struct inode *inode, pgoff_t fofs)
if (!f2fs_may_extent_tree(inode))
return;
- __drop_largest_extent(inode, fofs);
+ __drop_largest_extent(inode, fofs, 1);
}
void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
@@ -350,8 +351,7 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
}
if (en) {
- if (en->ei.len > et->largest.len)
- et->largest = en->ei;
+ __try_update_largest_extent(et, en);
et->cached_en = en;
}
return en;
@@ -388,18 +388,17 @@ do_insert:
if (!en)
return NULL;
- if (en->ei.len > et->largest.len)
- et->largest = en->ei;
+ __try_update_largest_extent(et, en);
et->cached_en = en;
return en;
}
-unsigned int f2fs_update_extent_tree_range(struct inode *inode,
+static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
pgoff_t fofs, block_t blkaddr, unsigned int len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et = F2FS_I(inode)->extent_tree;
- struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
+ struct extent_node *en = NULL, *en1 = NULL;
struct extent_node *prev_en = NULL, *next_en = NULL;
struct extent_info ei, dei, prev;
struct rb_node **insert_p = NULL, *insert_parent = NULL;
@@ -409,6 +408,8 @@ unsigned int f2fs_update_extent_tree_range(struct inode *inode,
if (!et)
return false;
+ trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
+
write_lock(&et->lock);
if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
@@ -419,148 +420,99 @@ unsigned int f2fs_update_extent_tree_range(struct inode *inode,
prev = et->largest;
dei.len = 0;
- /* we do not guarantee that the largest extent is cached all the time */
- __drop_largest_extent(inode, fofs);
+ /*
+ * drop largest extent before lookup, in case it's already
+ * been shrunk from extent tree
+ */
+ __drop_largest_extent(inode, fofs, len);
/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
en = __lookup_extent_tree_ret(et, fofs, &prev_en, &next_en,
&insert_p, &insert_parent);
- if (!en) {
- if (next_en) {
- en = next_en;
- f2fs_bug_on(sbi, en->ei.fofs <= pos);
- pos = en->ei.fofs;
- } else {
- /*
- * skip searching in the tree since there is no
- * larger extent node in the cache.
- */
- goto update_extent;
- }
- }
+ if (!en)
+ en = next_en;
/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
- while (en) {
- struct rb_node *node;
+ while (en && en->ei.fofs < end) {
+ unsigned int org_end;
+ int parts = 0; /* # of parts current extent split into */
- if (pos >= end)
- break;
+ next_en = en1 = NULL;
dei = en->ei;
- en1 = en2 = NULL;
+ org_end = dei.fofs + dei.len;
+ f2fs_bug_on(sbi, pos >= org_end);
- node = rb_next(&en->rb_node);
+ if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
+ en->ei.len = pos - en->ei.fofs;
+ prev_en = en;
+ parts = 1;
+ }
- /*
- * 2.1 there are four cases when we invalidate blkaddr in extent
- * node, |V: valid address, X: will be invalidated|
- */
- /* case#1, invalidate right part of extent node |VVVVVXXXXX| */
- if (pos > dei.fofs && end >= dei.fofs + dei.len) {
- en->ei.len = pos - dei.fofs;
-
- if (en->ei.len < F2FS_MIN_EXTENT_LEN) {
- __detach_extent_node(sbi, et, en);
- insert_p = NULL;
- insert_parent = NULL;
- goto update;
+ if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
+ if (parts) {
+ set_extent_info(&ei, end,
+ end - dei.fofs + dei.blk,
+ org_end - end);
+ en1 = __insert_extent_tree(sbi, et, &ei,
+ NULL, NULL);
+ next_en = en1;
+ } else {
+ en->ei.fofs = end;
+ en->ei.blk += end - dei.fofs;
+ en->ei.len -= end - dei.fofs;
+ next_en = en;
}
-
- if (__is_extent_same(&dei, &et->largest))
- et->largest = en->ei;
- goto next;
+ parts++;
}
- /* case#2, invalidate left part of extent node |XXXXXVVVVV| */
- if (pos <= dei.fofs && end < dei.fofs + dei.len) {
- en->ei.fofs = end;
- en->ei.blk += end - dei.fofs;
- en->ei.len -= end - dei.fofs;
-
- if (en->ei.len < F2FS_MIN_EXTENT_LEN) {
- __detach_extent_node(sbi, et, en);
- insert_p = NULL;
- insert_parent = NULL;
- goto update;
- }
+ if (!next_en) {
+ struct rb_node *node = rb_next(&en->rb_node);
- if (__is_extent_same(&dei, &et->largest))
- et->largest = en->ei;
- goto next;
+ next_en = node ?
+ rb_entry(node, struct extent_node, rb_node)
+ : NULL;
}
- __detach_extent_node(sbi, et, en);
+ if (parts)
+ __try_update_largest_extent(et, en);
+ else
+ __detach_extent_node(sbi, et, en);
/*
- * if we remove node in rb-tree, our parent node pointer may
- * point the wrong place, discard them.
+ * if original extent is split into zero or two parts, extent
+ * tree has been altered by deletion or insertion, therefore
+ * invalidate pointers regard to tree.
*/
- insert_p = NULL;
- insert_parent = NULL;
-
- /* case#3, invalidate entire extent node |XXXXXXXXXX| */
- if (pos <= dei.fofs && end >= dei.fofs + dei.len) {
- if (__is_extent_same(&dei, &et->largest))
- et->largest.len = 0;
- goto update;
+ if (parts != 1) {
+ insert_p = NULL;
+ insert_parent = NULL;
}
- /*
- * case#4, invalidate data in the middle of extent node
- * |VVVXXXXVVV|
- */
- if (dei.len > F2FS_MIN_EXTENT_LEN) {
- unsigned int endofs;
-
- /* insert left part of split extent into cache */
- if (pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
- set_extent_info(&ei, dei.fofs, dei.blk,
- pos - dei.fofs);
- en1 = __insert_extent_tree(sbi, et, &ei,
- NULL, NULL);
- }
-
- /* insert right part of split extent into cache */
- endofs = dei.fofs + dei.len;
- if (endofs - end >= F2FS_MIN_EXTENT_LEN) {
- set_extent_info(&ei, end,
- end - dei.fofs + dei.blk,
- endofs - end);
- en2 = __insert_extent_tree(sbi, et, &ei,
- NULL, NULL);
- }
- }
-update:
- /* 2.2 update in global extent list */
+ /* update in global extent list */
spin_lock(&sbi->extent_lock);
- if (en && !list_empty(&en->list))
+ if (!parts && !list_empty(&en->list))
list_del(&en->list);
if (en1)
list_add_tail(&en1->list, &sbi->extent_list);
- if (en2)
- list_add_tail(&en2->list, &sbi->extent_list);
spin_unlock(&sbi->extent_lock);
- /* 2.3 release extent node */
- if (en)
+ /* release extent node */
+ if (!parts)
kmem_cache_free(extent_node_slab, en);
-next:
- en = node ? rb_entry(node, struct extent_node, rb_node) : NULL;
- next_en = en;
- if (en)
- pos = en->ei.fofs;
+
+ en = next_en;
}
-update_extent:
/* 3. update extent in extent cache */
if (blkaddr) {
struct extent_node *den = NULL;
set_extent_info(&ei, fofs, blkaddr, len);
- en3 = __try_merge_extent_node(sbi, et, &ei, &den,
+ en1 = __try_merge_extent_node(sbi, et, &ei, &den,
prev_en, next_en);
- if (!en3)
- en3 = __insert_extent_tree(sbi, et, &ei,
+ if (!en1)
+ en1 = __insert_extent_tree(sbi, et, &ei,
insert_p, insert_parent);
/* give up extent_cache, if split and small updates happen */
@@ -572,11 +524,11 @@ update_extent:
}
spin_lock(&sbi->extent_lock);
- if (en3) {
- if (list_empty(&en3->list))
- list_add_tail(&en3->list, &sbi->extent_list);
+ if (en1) {
+ if (list_empty(&en1->list))
+ list_add_tail(&en1->list, &sbi->extent_list);
else
- list_move_tail(&en3->list, &sbi->extent_list);
+ list_move_tail(&en1->list, &sbi->extent_list);
}
if (den && !list_empty(&den->list))
list_del(&den->list);
@@ -650,6 +602,11 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
}
spin_unlock(&sbi->extent_lock);
+ /*
+ * reset ino for searching victims from beginning of global extent tree.
+ */
+ ino = F2FS_ROOT_INO(sbi);
+
while ((found = radix_tree_gang_lookup(root,
(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
unsigned i;
@@ -663,7 +620,7 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
write_unlock(&et->lock);
if (node_cnt + tree_cnt >= nr_shrink)
- break;
+ goto unlock_out;
}
}
unlock_out:
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index f1a90ffd7cad..9db5500d63d9 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -19,6 +19,7 @@
#include <linux/magic.h>
#include <linux/kobject.h>
#include <linux/sched.h>
+#include <linux/vmalloc.h>
#include <linux/bio.h>
#ifdef CONFIG_F2FS_CHECK_FS
@@ -52,6 +53,7 @@
#define F2FS_MOUNT_NOBARRIER 0x00000800
#define F2FS_MOUNT_FASTBOOT 0x00001000
#define F2FS_MOUNT_EXTENT_CACHE 0x00002000
+#define F2FS_MOUNT_FORCE_FG_GC 0x00004000
#define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
#define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
@@ -122,6 +124,7 @@ enum {
(SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
#define BATCHED_TRIM_BLOCKS(sbi) \
(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
+#define DEF_CP_INTERVAL 60 /* 60 secs */
struct cp_control {
int reason;
@@ -230,6 +233,7 @@ static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size,
#define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4)
#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
#define F2FS_IOC_GARBAGE_COLLECT _IO(F2FS_IOCTL_MAGIC, 6)
+#define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7)
#define F2FS_IOC_SET_ENCRYPTION_POLICY \
_IOR('f', 19, struct f2fs_encryption_policy)
@@ -246,6 +250,7 @@ static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size,
#define F2FS_GOING_DOWN_FULLSYNC 0x0 /* going down with full sync */
#define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */
#define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */
+#define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
@@ -492,12 +497,20 @@ static inline bool __is_front_mergeable(struct extent_info *cur,
return __is_extent_mergeable(cur, front);
}
+static inline void __try_update_largest_extent(struct extent_tree *et,
+ struct extent_node *en)
+{
+ if (en->ei.len > et->largest.len)
+ et->largest = en->ei;
+}
+
struct f2fs_nm_info {
block_t nat_blkaddr; /* base disk address of NAT */
nid_t max_nid; /* maximum possible node ids */
nid_t available_nids; /* maximum available node ids */
nid_t next_scan_nid; /* the next nid to be scanned */
unsigned int ram_thresh; /* control the memory footprint */
+ unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
/* NAT cache management */
struct radix_tree_root nat_root;/* root of the nat entry cache */
@@ -724,6 +737,7 @@ struct f2fs_sb_info {
struct rw_semaphore node_write; /* locking node writes */
struct mutex writepages; /* mutex for writepages() */
wait_queue_head_t cp_wait;
+ long cp_expires, cp_interval; /* next expected periodic cp */
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
@@ -787,10 +801,10 @@ struct f2fs_sb_info {
unsigned int segment_count[2]; /* # of allocated segments */
unsigned int block_count[2]; /* # of allocated blocks */
atomic_t inplace_count; /* # of inplace update */
- atomic_t total_hit_ext; /* # of lookup extent cache */
- atomic_t read_hit_rbtree; /* # of hit rbtree extent node */
- atomic_t read_hit_largest; /* # of hit largest extent node */
- atomic_t read_hit_cached; /* # of hit cached extent node */
+ atomic64_t total_hit_ext; /* # of lookup extent cache */
+ atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */
+ atomic64_t read_hit_largest; /* # of hit largest extent node */
+ atomic64_t read_hit_cached; /* # of hit cached extent node */
atomic_t inline_xattr; /* # of inline_xattr inodes */
atomic_t inline_inode; /* # of inline_data inodes */
atomic_t inline_dir; /* # of inline_dentry inodes */
@@ -1220,6 +1234,24 @@ static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
return sbi->total_valid_inode_count;
}
+static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
+ pgoff_t index, bool for_write)
+{
+ if (!for_write)
+ return grab_cache_page(mapping, index);
+ return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
+}
+
+static inline void f2fs_copy_page(struct page *src, struct page *dst)
+{
+ char *src_kaddr = kmap(src);
+ char *dst_kaddr = kmap(dst);
+
+ memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
+ kunmap(dst);
+ kunmap(src);
+}
+
static inline void f2fs_put_page(struct page *page, int unlock)
{
if (!page)
@@ -1579,6 +1611,26 @@ static inline bool f2fs_may_extent_tree(struct inode *inode)
return S_ISREG(mode);
}
+static inline void *f2fs_kvmalloc(size_t size, gfp_t flags)
+{
+ void *ret;
+
+ ret = kmalloc(size, flags | __GFP_NOWARN);
+ if (!ret)
+ ret = __vmalloc(size, flags, PAGE_KERNEL);
+ return ret;
+}
+
+static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
+{
+ void *ret;
+
+ ret = kzalloc(size, flags | __GFP_NOWARN);
+ if (!ret)
+ ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
+ return ret;
+}
+
#define get_inode_mode(i) \
((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
@@ -1721,6 +1773,7 @@ int f2fs_issue_flush(struct f2fs_sb_info *);
int create_flush_cmd_control(struct f2fs_sb_info *);
void destroy_flush_cmd_control(struct f2fs_sb_info *);
void invalidate_blocks(struct f2fs_sb_info *, block_t);
+bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
void release_discard_addrs(struct f2fs_sb_info *);
@@ -1739,6 +1792,7 @@ void f2fs_replace_block(struct f2fs_sb_info *, struct dnode_of_data *,
void allocate_data_block(struct f2fs_sb_info *, struct page *,
block_t, block_t *, struct f2fs_summary *, int);
void f2fs_wait_on_page_writeback(struct page *, enum page_type);
+void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *, block_t);
void write_data_summaries(struct f2fs_sb_info *, block_t);
void write_node_summaries(struct f2fs_sb_info *, block_t);
int lookup_journal_in_cursum(struct f2fs_summary_block *,
@@ -1754,8 +1808,9 @@ void destroy_segment_manager_caches(void);
*/
struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
+struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
-int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int);
+int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool);
void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
@@ -1787,9 +1842,9 @@ void set_data_blkaddr(struct dnode_of_data *);
int reserve_new_block(struct dnode_of_data *);
int f2fs_get_block(struct dnode_of_data *, pgoff_t);
int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
-struct page *get_read_data_page(struct inode *, pgoff_t, int);
+struct page *get_read_data_page(struct inode *, pgoff_t, int, bool);
struct page *find_data_page(struct inode *, pgoff_t);
-struct page *get_lock_data_page(struct inode *, pgoff_t);
+struct page *get_lock_data_page(struct inode *, pgoff_t, bool);
struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
int do_write_data_page(struct f2fs_io_info *);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
@@ -1802,7 +1857,7 @@ int f2fs_release_page(struct page *, gfp_t);
int start_gc_thread(struct f2fs_sb_info *);
void stop_gc_thread(struct f2fs_sb_info *);
block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *);
-int f2fs_gc(struct f2fs_sb_info *);
+int f2fs_gc(struct f2fs_sb_info *, bool);
void build_gc_manager(struct f2fs_sb_info *);
/*
@@ -1820,7 +1875,8 @@ struct f2fs_stat_info {
struct f2fs_sb_info *sbi;
int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
int main_area_segs, main_area_sections, main_area_zones;
- int hit_largest, hit_cached, hit_rbtree, hit_total, total_ext;
+ unsigned long long hit_largest, hit_cached, hit_rbtree;
+ unsigned long long hit_total, total_ext;
int ext_tree, ext_node;
int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
int nats, dirty_nats, sits, dirty_sits, fnids;
@@ -1844,7 +1900,7 @@ struct f2fs_stat_info {
unsigned int segment_count[2];
unsigned int block_count[2];
unsigned int inplace_count;
- unsigned base_mem, cache_mem, page_mem;
+ unsigned long long base_mem, cache_mem, page_mem;
};
static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
@@ -1857,10 +1913,10 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
#define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++)
#define stat_inc_dirty_dir(sbi) ((sbi)->n_dirty_dirs++)
#define stat_dec_dirty_dir(sbi) ((sbi)->n_dirty_dirs--)
-#define stat_inc_total_hit(sbi) (atomic_inc(&(sbi)->total_hit_ext))
-#define stat_inc_rbtree_node_hit(sbi) (atomic_inc(&(sbi)->read_hit_rbtree))
-#define stat_inc_largest_node_hit(sbi) (atomic_inc(&(sbi)->read_hit_largest))
-#define stat_inc_cached_node_hit(sbi) (atomic_inc(&(sbi)->read_hit_cached))
+#define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
+#define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree))
+#define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
+#define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached))
#define stat_inc_inline_xattr(inode) \
do { \
if (f2fs_has_inline_xattr(inode)) \
@@ -1998,6 +2054,8 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *,
bool f2fs_empty_inline_dir(struct inode *);
int f2fs_read_inline_dir(struct file *, struct dir_context *,
struct f2fs_str *);
+int f2fs_inline_data_fiemap(struct inode *,
+ struct fiemap_extent_info *, __u64, __u64);
/*
* shrinker.c
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 8120f8685141..a197215ad52b 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -74,7 +74,8 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
goto mapped;
/* page is wholly or partially inside EOF */
- if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
+ if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
+ i_size_read(inode)) {
unsigned offset;
offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
@@ -86,6 +87,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
mapped:
/* fill the page */
f2fs_wait_on_page_writeback(page, DATA);
+
+ /* wait for GCed encrypted page writeback */
+ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+ f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
+
/* if gced page is attached, don't write to cold segment */
clear_cold_data(page);
out:
@@ -343,7 +349,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
- for (; data_ofs < isize; data_ofs = pgofs << PAGE_CACHE_SHIFT) {
+ for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
if (err && err != -ENOENT) {
@@ -504,14 +510,14 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
return 0;
if (cache_only) {
- page = grab_cache_page(mapping, index);
+ page = f2fs_grab_cache_page(mapping, index, false);
if (page && PageUptodate(page))
goto truncate_out;
f2fs_put_page(page, 1);
return 0;
}
- page = get_lock_data_page(inode, index);
+ page = get_lock_data_page(inode, index, true);
if (IS_ERR(page))
return 0;
truncate_out:
@@ -680,6 +686,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
* larger than i_size.
*/
truncate_setsize(inode, attr->ia_size);
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
}
}
@@ -738,23 +745,31 @@ static int fill_zero(struct inode *inode, pgoff_t index,
int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
{
- pgoff_t index;
int err;
- for (index = pg_start; index < pg_end; index++) {
+ while (pg_start < pg_end) {
struct dnode_of_data dn;
+ pgoff_t end_offset, count;
set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
if (err) {
- if (err == -ENOENT)
+ if (err == -ENOENT) {
+ pg_start++;
continue;
+ }
return err;
}
- if (dn.data_blkaddr != NULL_ADDR)
- truncate_data_blocks_range(&dn, 1);
+ end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
+
+ f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
+
+ truncate_data_blocks_range(&dn, count);
f2fs_put_dnode(&dn);
+
+ pg_start += count;
}
return 0;
}
@@ -765,9 +780,6 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
loff_t off_start, off_end;
int ret = 0;
- if (!S_ISREG(inode->i_mode))
- return -EOPNOTSUPP;
-
if (f2fs_has_inline_data(inode)) {
ret = f2fs_convert_inline_inode(inode);
if (ret)
@@ -805,8 +817,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
f2fs_balance_fs(sbi);
- blk_start = pg_start << PAGE_CACHE_SHIFT;
- blk_end = pg_end << PAGE_CACHE_SHIFT;
+ blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
+ blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
truncate_inode_pages_range(mapping, blk_start,
blk_end - 1);
@@ -819,86 +831,100 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
return ret;
}
-static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
+static int __exchange_data_block(struct inode *inode, pgoff_t src,
+ pgoff_t dst, bool full)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
- pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
- int ret = 0;
-
- for (; end < nrpages; start++, end++) {
- block_t new_addr, old_addr;
-
- f2fs_lock_op(sbi);
+ block_t new_addr;
+ bool do_replace = false;
+ int ret;
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- ret = get_dnode_of_data(&dn, end, LOOKUP_NODE_RA);
- if (ret && ret != -ENOENT) {
- goto out;
- } else if (ret == -ENOENT) {
- new_addr = NULL_ADDR;
- } else {
- new_addr = dn.data_blkaddr;
- truncate_data_blocks_range(&dn, 1);
- f2fs_put_dnode(&dn);
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
+ if (ret && ret != -ENOENT) {
+ return ret;
+ } else if (ret == -ENOENT) {
+ new_addr = NULL_ADDR;
+ } else {
+ new_addr = dn.data_blkaddr;
+ if (!is_checkpointed_data(sbi, new_addr)) {
+ dn.data_blkaddr = NULL_ADDR;
+ /* do not invalidate this block address */
+ set_data_blkaddr(&dn);
+ f2fs_update_extent_cache(&dn);
+ do_replace = true;
}
+ f2fs_put_dnode(&dn);
+ }
- if (new_addr == NULL_ADDR) {
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- ret = get_dnode_of_data(&dn, start, LOOKUP_NODE_RA);
- if (ret && ret != -ENOENT) {
- goto out;
- } else if (ret == -ENOENT) {
- f2fs_unlock_op(sbi);
- continue;
- }
+ if (new_addr == NULL_ADDR)
+ return full ? truncate_hole(inode, dst, dst + 1) : 0;
- if (dn.data_blkaddr == NULL_ADDR) {
- f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
- continue;
- } else {
- truncate_data_blocks_range(&dn, 1);
- }
+ if (do_replace) {
+ struct page *ipage = get_node_page(sbi, inode->i_ino);
+ struct node_info ni;
- f2fs_put_dnode(&dn);
- } else {
- struct page *ipage;
+ if (IS_ERR(ipage)) {
+ ret = PTR_ERR(ipage);
+ goto err_out;
+ }
- ipage = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- ret = PTR_ERR(ipage);
- goto out;
- }
+ set_new_dnode(&dn, inode, ipage, NULL, 0);
+ ret = f2fs_reserve_block(&dn, dst);
+ if (ret)
+ goto err_out;
- set_new_dnode(&dn, inode, ipage, NULL, 0);
- ret = f2fs_reserve_block(&dn, start);
- if (ret)
- goto out;
+ truncate_data_blocks_range(&dn, 1);
- old_addr = dn.data_blkaddr;
- if (old_addr != NEW_ADDR && new_addr == NEW_ADDR) {
- dn.data_blkaddr = NULL_ADDR;
- f2fs_update_extent_cache(&dn);
- invalidate_blocks(sbi, old_addr);
+ get_node_info(sbi, dn.nid, &ni);
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
+ ni.version, true);
+ f2fs_put_dnode(&dn);
+ } else {
+ struct page *psrc, *pdst;
+
+ psrc = get_lock_data_page(inode, src, true);
+ if (IS_ERR(psrc))
+ return PTR_ERR(psrc);
+ pdst = get_new_data_page(inode, NULL, dst, false);
+ if (IS_ERR(pdst)) {
+ f2fs_put_page(psrc, 1);
+ return PTR_ERR(pdst);
+ }
+ f2fs_copy_page(psrc, pdst);
+ set_page_dirty(pdst);
+ f2fs_put_page(pdst, 1);
+ f2fs_put_page(psrc, 1);
- dn.data_blkaddr = new_addr;
- set_data_blkaddr(&dn);
- } else if (new_addr != NEW_ADDR) {
- struct node_info ni;
+ return truncate_hole(inode, src, src + 1);
+ }
+ return 0;
- get_node_info(sbi, dn.nid, &ni);
- f2fs_replace_block(sbi, &dn, old_addr, new_addr,
- ni.version, true);
- }
+err_out:
+ if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
+ dn.data_blkaddr = new_addr;
+ set_data_blkaddr(&dn);
+ f2fs_update_extent_cache(&dn);
+ f2fs_put_dnode(&dn);
+ }
+ return ret;
+}
- f2fs_put_dnode(&dn);
- }
+static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+ int ret = 0;
+
+ for (; end < nrpages; start++, end++) {
+ f2fs_balance_fs(sbi);
+ f2fs_lock_op(sbi);
+ ret = __exchange_data_block(inode, end, start, true);
f2fs_unlock_op(sbi);
+ if (ret)
+ break;
}
- return 0;
-out:
- f2fs_unlock_op(sbi);
return ret;
}
@@ -908,9 +934,6 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
loff_t new_size;
int ret;
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
-
if (offset + len >= i_size_read(inode))
return -EINVAL;
@@ -940,7 +963,12 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (ret)
return ret;
+ /* write out all moved pages, if possible */
+ filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+ truncate_pagecache(inode, offset);
+
new_size = i_size_read(inode) - len;
+ truncate_pagecache(inode, new_size);
ret = truncate_blocks(inode, new_size, true);
if (!ret)
@@ -959,9 +987,6 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
loff_t off_start, off_end;
int ret = 0;
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
-
ret = inode_newsize_ok(inode, (len + offset));
if (ret)
return ret;
@@ -1003,7 +1028,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
return ret;
new_size = max_t(loff_t, new_size,
- pg_start << PAGE_CACHE_SHIFT);
+ (loff_t)pg_start << PAGE_CACHE_SHIFT);
}
for (index = pg_start; index < pg_end; index++) {
@@ -1039,7 +1064,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
f2fs_unlock_op(sbi);
new_size = max_t(loff_t, new_size,
- (index + 1) << PAGE_CACHE_SHIFT);
+ (loff_t)(index + 1) << PAGE_CACHE_SHIFT);
}
if (off_end) {
@@ -1066,10 +1091,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t pg_start, pg_end, delta, nrpages, idx;
loff_t new_size;
- int ret;
-
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
+ int ret = 0;
new_size = i_size_read(inode) + len;
if (new_size > inode->i_sb->s_maxbytes)
@@ -1107,57 +1129,19 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) {
- struct dnode_of_data dn;
- struct page *ipage;
- block_t new_addr, old_addr;
-
f2fs_lock_op(sbi);
-
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- ret = get_dnode_of_data(&dn, idx, LOOKUP_NODE_RA);
- if (ret && ret != -ENOENT) {
- goto out;
- } else if (ret == -ENOENT) {
- goto next;
- } else if (dn.data_blkaddr == NULL_ADDR) {
- f2fs_put_dnode(&dn);
- goto next;
- } else {
- new_addr = dn.data_blkaddr;
- truncate_data_blocks_range(&dn, 1);
- f2fs_put_dnode(&dn);
- }
-
- ipage = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- ret = PTR_ERR(ipage);
- goto out;
- }
-
- set_new_dnode(&dn, inode, ipage, NULL, 0);
- ret = f2fs_reserve_block(&dn, idx + delta);
- if (ret)
- goto out;
-
- old_addr = dn.data_blkaddr;
- f2fs_bug_on(sbi, old_addr != NEW_ADDR);
-
- if (new_addr != NEW_ADDR) {
- struct node_info ni;
-
- get_node_info(sbi, dn.nid, &ni);
- f2fs_replace_block(sbi, &dn, old_addr, new_addr,
- ni.version, true);
- }
- f2fs_put_dnode(&dn);
-next:
+ ret = __exchange_data_block(inode, idx, idx + delta, false);
f2fs_unlock_op(sbi);
+ if (ret)
+ break;
}
- i_size_write(inode, new_size);
- return 0;
-out:
- f2fs_unlock_op(sbi);
+ /* write out all moved pages, if possible */
+ filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+ truncate_pagecache(inode, offset);
+
+ if (!ret)
+ i_size_write(inode, new_size);
return ret;
}
@@ -1204,9 +1188,10 @@ noalloc:
if (pg_start == pg_end)
new_size = offset + len;
else if (index == pg_start && off_start)
- new_size = (index + 1) << PAGE_CACHE_SHIFT;
+ new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT;
else if (index == pg_end)
- new_size = (index << PAGE_CACHE_SHIFT) + off_end;
+ new_size = ((loff_t)index << PAGE_CACHE_SHIFT) +
+ off_end;
else
new_size += PAGE_CACHE_SIZE;
}
@@ -1228,6 +1213,10 @@ static long f2fs_fallocate(struct file *file, int mode,
struct inode *inode = file_inode(file);
long ret = 0;
+ /* f2fs only support ->fallocate for regular file */
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
if (f2fs_encrypted_inode(inode) &&
(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
return -EOPNOTSUPP;
@@ -1437,8 +1426,7 @@ static int f2fs_ioc_release_volatile_write(struct file *filp)
if (!f2fs_is_first_block_written(inode))
return truncate_partial_data_page(inode, 0, true);
- punch_hole(inode, 0, F2FS_BLKSIZE);
- return 0;
+ return punch_hole(inode, 0, F2FS_BLKSIZE);
}
static int f2fs_ioc_abort_volatile_write(struct file *filp)
@@ -1455,13 +1443,9 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
f2fs_balance_fs(F2FS_I_SB(inode));
- if (f2fs_is_atomic_file(inode)) {
- clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
- commit_inmem_pages(inode, true);
- }
-
- if (f2fs_is_volatile_file(inode))
- clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
+ clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+ clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
+ commit_inmem_pages(inode, true);
mnt_drop_write_file(filp);
return ret;
@@ -1496,6 +1480,10 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
case F2FS_GOING_DOWN_NOSYNC:
f2fs_stop_checkpoint(sbi);
break;
+ case F2FS_GOING_DOWN_METAFLUSH:
+ sync_meta_pages(sbi, META, LONG_MAX);
+ f2fs_stop_checkpoint(sbi);
+ break;
default:
return -EINVAL;
}
@@ -1616,27 +1604,44 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- __u32 i, count;
+ __u32 sync;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (get_user(count, (__u32 __user *)arg))
+ if (get_user(sync, (__u32 __user *)arg))
return -EFAULT;
- if (!count || count > F2FS_BATCH_GC_MAX_NUM)
- return -EINVAL;
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
- for (i = 0; i < count; i++) {
+ if (!sync) {
if (!mutex_trylock(&sbi->gc_mutex))
- break;
-
- if (f2fs_gc(sbi))
- break;
+ return -EBUSY;
+ } else {
+ mutex_lock(&sbi->gc_mutex);
}
- if (put_user(i, (__u32 __user *)arg))
- return -EFAULT;
+ return f2fs_gc(sbi, sync);
+}
+
+static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct cp_control cpc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+ cpc.reason = __get_cp_reason(sbi);
+
+ mutex_lock(&sbi->gc_mutex);
+ write_checkpoint(sbi, &cpc);
+ mutex_unlock(&sbi->gc_mutex);
return 0;
}
@@ -1672,6 +1677,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_get_encryption_pwsalt(filp, arg);
case F2FS_IOC_GARBAGE_COLLECT:
return f2fs_ioc_gc(filp, arg);
+ case F2FS_IOC_WRITE_CHECKPOINT:
+ return f2fs_ioc_write_checkpoint(filp, arg);
default:
return -ENOTTY;
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 782b8e72c094..fedbf67a0842 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -78,9 +78,12 @@ static int gc_thread_func(void *data)
stat_inc_bggc_count(sbi);
/* if return value is not zero, no victim was selected */
- if (f2fs_gc(sbi))
+ if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC)))
wait_ms = gc_th->no_gc_sleep_time;
+ trace_f2fs_background_gc(sbi->sb, wait_ms,
+ prefree_segments(sbi), free_segments(sbi));
+
/* balancing f2fs's metadata periodically */
f2fs_balance_fs_bg(sbi);
@@ -257,6 +260,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct victim_sel_policy p;
unsigned int secno, max_cost;
+ unsigned int last_segment = MAIN_SEGS(sbi);
int nsearched = 0;
mutex_lock(&dirty_i->seglist_lock);
@@ -267,6 +271,9 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
p.min_segno = NULL_SEGNO;
p.min_cost = max_cost = get_max_cost(sbi, &p);
+ if (p.max_search == 0)
+ goto out;
+
if (p.alloc_mode == LFS && gc_type == FG_GC) {
p.min_segno = check_bg_victims(sbi);
if (p.min_segno != NULL_SEGNO)
@@ -277,9 +284,10 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
unsigned long cost;
unsigned int segno;
- segno = find_next_bit(p.dirty_segmap, MAIN_SEGS(sbi), p.offset);
- if (segno >= MAIN_SEGS(sbi)) {
+ segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
+ if (segno >= last_segment) {
if (sbi->last_victim[p.gc_mode]) {
+ last_segment = sbi->last_victim[p.gc_mode];
sbi->last_victim[p.gc_mode] = 0;
p.offset = 0;
continue;
@@ -327,6 +335,7 @@ got_it:
sbi->cur_victim_sec,
prefree_segments(sbi), free_segments(sbi));
}
+out:
mutex_unlock(&dirty_i->seglist_lock);
return (p.min_segno == NULL_SEGNO) ? 0 : 1;
@@ -541,7 +550,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
int err;
/* do not read out */
- page = grab_cache_page(inode->i_mapping, bidx);
+ page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
if (!page)
return;
@@ -550,8 +559,16 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
if (err)
goto out;
- if (unlikely(dn.data_blkaddr == NULL_ADDR))
+ if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
+ ClearPageUptodate(page);
goto put_out;
+ }
+
+ /*
+ * don't cache encrypted data into meta inode until previous dirty
+ * data were writebacked to avoid racing between GC and flush.
+ */
+ f2fs_wait_on_page_writeback(page, DATA);
get_node_info(fio.sbi, dn.nid, &ni);
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
@@ -580,7 +597,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
goto put_page_out;
set_page_dirty(fio.encrypted_page);
- f2fs_wait_on_page_writeback(fio.encrypted_page, META);
+ f2fs_wait_on_page_writeback(fio.encrypted_page, DATA);
if (clear_page_dirty_for_io(fio.encrypted_page))
dec_page_count(fio.sbi, F2FS_DIRTY_META);
@@ -611,7 +628,7 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
{
struct page *page;
- page = get_lock_data_page(inode, bidx);
+ page = get_lock_data_page(inode, bidx, true);
if (IS_ERR(page))
return;
@@ -705,7 +722,7 @@ next_step:
start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
data_page = get_read_data_page(inode,
- start_bidx + ofs_in_node, READA);
+ start_bidx + ofs_in_node, READA, true);
if (IS_ERR(data_page)) {
iput(inode);
continue;
@@ -797,13 +814,12 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
return nfree;
}
-int f2fs_gc(struct f2fs_sb_info *sbi)
+int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
{
- unsigned int segno = NULL_SEGNO;
- unsigned int i;
- int gc_type = BG_GC;
- int nfree = 0;
- int ret = -1;
+ unsigned int segno, i;
+ int gc_type = sync ? FG_GC : BG_GC;
+ int sec_freed = 0;
+ int ret = -EINVAL;
struct cp_control cpc;
struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist),
@@ -812,12 +828,14 @@ int f2fs_gc(struct f2fs_sb_info *sbi)
cpc.reason = __get_cp_reason(sbi);
gc_more:
+ segno = NULL_SEGNO;
+
if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
goto stop;
if (unlikely(f2fs_cp_error(sbi)))
goto stop;
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
gc_type = FG_GC;
if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
write_checkpoint(sbi, &cpc);
@@ -830,23 +848,38 @@ gc_more:
/* readahead multi ssa blocks those have contiguous address */
if (sbi->segs_per_sec > 1)
ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
- META_SSA);
+ META_SSA, true);
- for (i = 0; i < sbi->segs_per_sec; i++)
- nfree += do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
+ for (i = 0; i < sbi->segs_per_sec; i++) {
+ /*
+ * for FG_GC case, halt gcing left segments once failed one
+ * of segments in selected section to avoid long latency.
+ */
+ if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) &&
+ gc_type == FG_GC)
+ break;
+ }
+
+ if (i == sbi->segs_per_sec && gc_type == FG_GC)
+ sec_freed++;
if (gc_type == FG_GC)
sbi->cur_victim_sec = NULL_SEGNO;
- if (has_not_enough_free_secs(sbi, nfree))
- goto gc_more;
+ if (!sync) {
+ if (has_not_enough_free_secs(sbi, sec_freed))
+ goto gc_more;
- if (gc_type == FG_GC)
- write_checkpoint(sbi, &cpc);
+ if (gc_type == FG_GC)
+ write_checkpoint(sbi, &cpc);
+ }
stop:
mutex_unlock(&sbi->gc_mutex);
put_gc_inode(&gc_list);
+
+ if (sync)
+ ret = sec_freed ? 0 : -EAGAIN;
return ret;
}
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index c5a055b3376e..b4a65be9f7d3 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -19,12 +19,6 @@
#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
-/*
- * with this macro, we can control the max time we do garbage collection,
- * when user triggers batch mode gc by ioctl.
- */
-#define F2FS_BATCH_GC_MAX_NUM 16
-
/* Search max. number of dirty segments to select a victim segment */
#define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 3d143be42895..bda7126466c0 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -12,6 +12,7 @@
#include <linux/f2fs_fs.h>
#include "f2fs.h"
+#include "node.h"
bool f2fs_may_inline_data(struct inode *inode)
{
@@ -274,12 +275,14 @@ process_inline:
if (f2fs_has_inline_data(inode)) {
ipage = get_node_page(sbi, inode->i_ino);
f2fs_bug_on(sbi, IS_ERR(ipage));
- truncate_inline_inode(ipage, 0);
+ if (!truncate_inline_inode(ipage, 0))
+ return false;
f2fs_clear_inline_inode(inode);
update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
- truncate_blocks(inode, 0, false);
+ if (truncate_blocks(inode, 0, false))
+ return false;
goto process_inline;
}
return false;
@@ -568,3 +571,38 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
f2fs_put_page(ipage, 1);
return 0;
}
+
+int f2fs_inline_data_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
+{
+ __u64 byteaddr, ilen;
+ __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
+ FIEMAP_EXTENT_LAST;
+ struct node_info ni;
+ struct page *ipage;
+ int err = 0;
+
+ ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
+
+ if (!f2fs_has_inline_data(inode)) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ ilen = min_t(size_t, MAX_INLINE_DATA, i_size_read(inode));
+ if (start >= ilen)
+ goto out;
+ if (start + len < ilen)
+ ilen = start + len;
+ ilen -= start;
+
+ get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+ byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
+ byteaddr += (char *)inline_data_addr(ipage) - (char *)F2FS_INODE(ipage);
+ err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
+out:
+ f2fs_put_page(ipage, 1);
+ return err;
+}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 35aae65b3e5d..97e20decacb4 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -296,16 +296,12 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
return 0;
/*
- * We need to lock here to prevent from producing dirty node pages
+ * We need to balance fs here to prevent from producing dirty node pages
* during the urgent cleaning time when runing out of free sections.
*/
- f2fs_lock_op(sbi);
update_inode_page(inode);
- f2fs_unlock_op(sbi);
-
- if (wbc)
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi);
return 0;
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index a680bf38e4f0..e48b80c49090 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -410,11 +410,14 @@ err_out:
* If the symlink path is stored into inline_data, there is no
* performance regression.
*/
- if (!err)
+ if (!err) {
filemap_write_and_wait_range(inode->i_mapping, 0, p_len - 1);
- if (IS_DIRSYNC(dir))
- f2fs_sync_fs(sbi->sb, 1);
+ if (IS_DIRSYNC(dir))
+ f2fs_sync_fs(sbi->sb, 1);
+ } else {
+ f2fs_unlink(dir, dentry);
+ }
kfree(sd);
f2fs_fname_crypto_free_buffer(&disk_link);
@@ -947,8 +950,13 @@ static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cook
/* Symlink is encrypted */
sd = (struct f2fs_encrypted_symlink_data *)caddr;
- cstr.name = sd->encrypted_path;
cstr.len = le16_to_cpu(sd->len);
+ cstr.name = kmalloc(cstr.len, GFP_NOFS);
+ if (!cstr.name) {
+ res = -ENOMEM;
+ goto errout;
+ }
+ memcpy(cstr.name, sd->encrypted_path, cstr.len);
/* this is broken symlink case */
if (cstr.name[0] == 0 && cstr.len == 0) {
@@ -970,6 +978,8 @@ static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cook
if (res < 0)
goto errout;
+ kfree(cstr.name);
+
paddr = pstr.name;
/* Null-terminate the name */
@@ -979,6 +989,7 @@ static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cook
page_cache_release(cpage);
return *cookie = paddr;
errout:
+ kfree(cstr.name);
f2fs_fname_crypto_free_buffer(&pstr);
kunmap(cpage);
page_cache_release(cpage);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 27d1a74dd6f3..7bcbc6e9c40d 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1323,23 +1323,24 @@ static int f2fs_write_node_page(struct page *page,
nid = nid_of_node(page);
f2fs_bug_on(sbi, page->index != nid);
+ if (wbc->for_reclaim) {
+ if (!down_read_trylock(&sbi->node_write))
+ goto redirty_out;
+ } else {
+ down_read(&sbi->node_write);
+ }
+
get_node_info(sbi, nid, &ni);
/* This page is already truncated */
if (unlikely(ni.blk_addr == NULL_ADDR)) {
ClearPageUptodate(page);
dec_page_count(sbi, F2FS_DIRTY_NODES);
+ up_read(&sbi->node_write);
unlock_page(page);
return 0;
}
- if (wbc->for_reclaim) {
- if (!down_read_trylock(&sbi->node_write))
- goto redirty_out;
- } else {
- down_read(&sbi->node_write);
- }
-
set_page_writeback(page);
fio.blk_addr = ni.blk_addr;
write_node_page(nid, &fio);
@@ -1528,7 +1529,8 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
return;
/* readahead nat pages to be scanned */
- ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT);
+ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
+ META_NAT, true);
while (1) {
struct page *page = get_current_nat_page(sbi, nid);
@@ -1558,6 +1560,9 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
remove_free_nid(nm_i, nid);
}
mutex_unlock(&curseg->curseg_mutex);
+
+ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
+ nm_i->ra_nid_pages, META_NAT, false);
}
/*
@@ -1803,10 +1808,10 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
nrpages = min(last_offset - i, bio_blocks);
/* readahead node pages */
- ra_meta_pages(sbi, addr, nrpages, META_POR);
+ ra_meta_pages(sbi, addr, nrpages, META_POR, true);
for (idx = addr; idx < addr + nrpages; idx++) {
- struct page *page = get_meta_page(sbi, idx);
+ struct page *page = get_tmp_page(sbi, idx);
rn = F2FS_NODE(page);
sum_entry->nid = rn->footer.nid;
@@ -2000,6 +2005,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
nm_i->fcnt = 0;
nm_i->nat_cnt = 0;
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
+ nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
INIT_LIST_HEAD(&nm_i->free_nid_list);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 7427e956ad81..e4fffd2d98c4 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -14,9 +14,11 @@
/* node block offset on the NAT area dedicated to the given start node id */
#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
-/* # of pages to perform readahead before building free nids */
+/* # of pages to perform synchronous readahead before building free nids */
#define FREE_NID_PAGES 4
+#define DEF_RA_NID_PAGES 4 /* # of nid pages to be readaheaded */
+
/* maximum readahead size for node during getting data blocks */
#define MAX_RA_NODE 128
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index faec2ca004b9..cbf74f47cce8 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -180,7 +180,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
- ra_meta_pages(sbi, blkaddr, 1, META_POR);
+ ra_meta_pages(sbi, blkaddr, 1, META_POR, true);
while (1) {
struct fsync_inode_entry *entry;
@@ -188,7 +188,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
return 0;
- page = get_meta_page(sbi, blkaddr);
+ page = get_tmp_page(sbi, blkaddr);
if (cp_ver != cpver_of_node(page))
break;
@@ -383,15 +383,11 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
start = start_bidx_of_node(ofs_of_node(page), fi);
end = start + ADDRS_PER_PAGE(page, fi);
- f2fs_lock_op(sbi);
-
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, start, ALLOC_NODE);
- if (err) {
- f2fs_unlock_op(sbi);
+ if (err)
goto out;
- }
f2fs_wait_on_page_writeback(dn.node_page, NODE);
@@ -456,7 +452,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
set_page_dirty(dn.node_page);
err:
f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
out:
f2fs_msg(sbi->sb, KERN_NOTICE,
"recover_data: ino = %lx, recovered = %d blocks, err = %d",
@@ -485,7 +480,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
ra_meta_pages_cond(sbi, blkaddr);
- page = get_meta_page(sbi, blkaddr);
+ page = get_tmp_page(sbi, blkaddr);
if (cp_ver != cpver_of_node(page)) {
f2fs_put_page(page, 1);
@@ -570,7 +565,7 @@ out:
/* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi),
- MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
+ (loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 78e6d0696847..f77b3258454a 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -14,8 +14,8 @@
#include <linux/blkdev.h>
#include <linux/prefetch.h>
#include <linux/kthread.h>
-#include <linux/vmalloc.h>
#include <linux/swap.h>
+#include <linux/timer.h>
#include "f2fs.h"
#include "segment.h"
@@ -29,6 +29,21 @@ static struct kmem_cache *discard_entry_slab;
static struct kmem_cache *sit_entry_set_slab;
static struct kmem_cache *inmem_entry_slab;
+static unsigned long __reverse_ulong(unsigned char *str)
+{
+ unsigned long tmp = 0;
+ int shift = 24, idx = 0;
+
+#if BITS_PER_LONG == 64
+ shift = 56;
+#endif
+ while (shift >= 0) {
+ tmp |= (unsigned long)str[idx++] << shift;
+ shift -= BITS_PER_BYTE;
+ }
+ return tmp;
+}
+
/*
* __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
* MSB and LSB are reversed in a byte by f2fs_set_bit.
@@ -38,27 +53,31 @@ static inline unsigned long __reverse_ffs(unsigned long word)
int num = 0;
#if BITS_PER_LONG == 64
- if ((word & 0xffffffff) == 0) {
+ if ((word & 0xffffffff00000000UL) == 0)
num += 32;
+ else
word >>= 32;
- }
#endif
- if ((word & 0xffff) == 0) {
+ if ((word & 0xffff0000) == 0)
num += 16;
+ else
word >>= 16;
- }
- if ((word & 0xff) == 0) {
+
+ if ((word & 0xff00) == 0)
num += 8;
+ else
word >>= 8;
- }
+
if ((word & 0xf0) == 0)
num += 4;
else
word >>= 4;
+
if ((word & 0xc) == 0)
num += 2;
else
word >>= 2;
+
if ((word & 0x2) == 0)
num += 1;
return num;
@@ -68,26 +87,16 @@ static inline unsigned long __reverse_ffs(unsigned long word)
* __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
* f2fs_set_bit makes MSB and LSB reversed in a byte.
* Example:
- * LSB <--> MSB
- * f2fs_set_bit(0, bitmap) => 0000 0001
- * f2fs_set_bit(7, bitmap) => 1000 0000
+ * MSB <--> LSB
+ * f2fs_set_bit(0, bitmap) => 1000 0000
+ * f2fs_set_bit(7, bitmap) => 0000 0001
*/
static unsigned long __find_rev_next_bit(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
- while (!f2fs_test_bit(offset, (unsigned char *)addr))
- offset++;
-
- if (offset > size)
- offset = size;
-
- return offset;
-#if 0
const unsigned long *p = addr + BIT_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
- unsigned long mask, submask;
- unsigned long quot, rest;
if (offset >= size)
return size;
@@ -97,14 +106,9 @@ static unsigned long __find_rev_next_bit(const unsigned long *addr,
if (!offset)
goto aligned;
- tmp = *(p++);
- quot = (offset >> 3) << 3;
- rest = offset & 0x7;
- mask = ~0UL << quot;
- submask = (unsigned char)(0xff << rest) >> rest;
- submask <<= quot;
- mask &= submask;
- tmp &= mask;
+ tmp = __reverse_ulong((unsigned char *)p);
+ tmp &= ~0UL >> offset;
+
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
@@ -112,42 +116,34 @@ static unsigned long __find_rev_next_bit(const unsigned long *addr,
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
+ p++;
aligned:
while (size & ~(BITS_PER_LONG-1)) {
- tmp = *(p++);
+ tmp = __reverse_ulong((unsigned char *)p);
if (tmp)
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
+ p++;
}
if (!size)
return result;
- tmp = *p;
+
+ tmp = __reverse_ulong((unsigned char *)p);
found_first:
- tmp &= (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
+ tmp &= (~0UL << (BITS_PER_LONG - size));
+ if (!tmp) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + __reverse_ffs(tmp);
-#endif
}
static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
- while (f2fs_test_bit(offset, (unsigned char *)addr))
- offset++;
-
- if (offset > size)
- offset = size;
-
- return offset;
-#if 0
const unsigned long *p = addr + BIT_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
- unsigned long mask, submask;
- unsigned long quot, rest;
if (offset >= size)
return size;
@@ -157,40 +153,36 @@ static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
if (!offset)
goto aligned;
- tmp = *(p++);
- quot = (offset >> 3) << 3;
- rest = offset & 0x7;
- mask = ~(~0UL << quot);
- submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest);
- submask <<= quot;
- mask += submask;
- tmp |= mask;
+ tmp = __reverse_ulong((unsigned char *)p);
+ tmp |= ~((~0UL << offset) >> offset);
+
if (size < BITS_PER_LONG)
goto found_first;
- if (~tmp)
+ if (tmp != ~0UL)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
+ p++;
aligned:
while (size & ~(BITS_PER_LONG - 1)) {
- tmp = *(p++);
- if (~tmp)
+ tmp = __reverse_ulong((unsigned char *)p);
+ if (tmp != ~0UL)
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
+ p++;
}
if (!size)
return result;
- tmp = *p;
+ tmp = __reverse_ulong((unsigned char *)p);
found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
+ tmp |= ~(~0UL << (BITS_PER_LONG - size));
+ if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. */
found_middle:
return result + __reverse_ffz(tmp);
-#endif
}
void register_inmem_page(struct inode *inode, struct page *page)
@@ -257,11 +249,12 @@ int commit_inmem_pages(struct inode *inode, bool abort)
trace_f2fs_commit_inmem_page(cur->page, INMEM);
fio.page = cur->page;
err = do_write_data_page(&fio);
- submit_bio = true;
if (err) {
unlock_page(cur->page);
break;
}
+ clear_cold_data(cur->page);
+ submit_bio = true;
}
} else {
trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
@@ -296,7 +289,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
*/
if (has_not_enough_free_secs(sbi, 0)) {
mutex_lock(&sbi->gc_mutex);
- f2fs_gc(sbi);
+ f2fs_gc(sbi, false);
}
}
@@ -316,7 +309,8 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
/* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) ||
excess_prefree_segs(sbi) ||
- !available_free_memory(sbi, INO_ENTRIES))
+ !available_free_memory(sbi, INO_ENTRIES) ||
+ jiffies > sbi->cp_expires)
f2fs_sync_fs(sbi->sb, true);
}
@@ -767,6 +761,30 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
mutex_unlock(&sit_i->sentry_lock);
}
+bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ unsigned int segno, offset;
+ struct seg_entry *se;
+ bool is_cp = false;
+
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
+ return true;
+
+ mutex_lock(&sit_i->sentry_lock);
+
+ segno = GET_SEGNO(sbi, blkaddr);
+ se = get_seg_entry(sbi, segno);
+ offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
+
+ if (f2fs_test_bit(offset, se->ckpt_valid_map))
+ is_cp = true;
+
+ mutex_unlock(&sit_i->sentry_lock);
+
+ return is_cp;
+}
+
/*
* This function should be resided under the curseg_mutex lock
*/
@@ -1292,6 +1310,9 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
.encrypted_page = NULL,
};
+ if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
+ fio.rw &= ~REQ_META;
+
set_page_writeback(page);
f2fs_submit_page_mbio(&fio);
}
@@ -1369,7 +1390,14 @@ static void __f2fs_replace_block(struct f2fs_sb_info *sbi,
curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
__add_sum_entry(sbi, type, sum);
- refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
+ if (!recover_curseg)
+ update_sit_entry(sbi, new_blkaddr, 1);
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ update_sit_entry(sbi, old_blkaddr, -1);
+
+ locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
+ locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
+
locate_dirty_segment(sbi, old_cursegno);
if (recover_curseg) {
@@ -1449,6 +1477,23 @@ void f2fs_wait_on_page_writeback(struct page *page,
}
}
+void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
+ block_t blkaddr)
+{
+ struct page *cpage;
+
+ if (blkaddr == NEW_ADDR)
+ return;
+
+ f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
+
+ cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
+ if (cpage) {
+ f2fs_wait_on_page_writeback(cpage, DATA);
+ f2fs_put_page(cpage, 1);
+ }
+}
+
static int read_compacted_summaries(struct f2fs_sb_info *sbi)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -1586,7 +1631,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
if (npages >= 2)
ra_meta_pages(sbi, start_sum_block(sbi), npages,
- META_CP);
+ META_CP, true);
/* restore for compacted data summary */
if (read_compacted_summaries(sbi))
@@ -1596,7 +1641,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
if (__exist_node_summaries(sbi))
ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
- NR_CURSEG_TYPE - type, META_CP);
+ NR_CURSEG_TYPE - type, META_CP, true);
for (; type <= CURSEG_COLD_NODE; type++) {
err = read_normal_summaries(sbi, type);
@@ -1955,12 +2000,13 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
SM_I(sbi)->sit_info = sit_i;
- sit_i->sentries = vzalloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry));
+ sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) *
+ sizeof(struct seg_entry), GFP_KERNEL);
if (!sit_i->sentries)
return -ENOMEM;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
- sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
if (!sit_i->dirty_sentries_bitmap)
return -ENOMEM;
@@ -1982,8 +2028,8 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
return -ENOMEM;
if (sbi->segs_per_sec > 1) {
- sit_i->sec_entries = vzalloc(MAIN_SECS(sbi) *
- sizeof(struct sec_entry));
+ sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) *
+ sizeof(struct sec_entry), GFP_KERNEL);
if (!sit_i->sec_entries)
return -ENOMEM;
}
@@ -2028,12 +2074,12 @@ static int build_free_segmap(struct f2fs_sb_info *sbi)
SM_I(sbi)->free_info = free_i;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
- free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
+ free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL);
if (!free_i->free_segmap)
return -ENOMEM;
sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
- free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
+ free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL);
if (!free_i->free_secmap)
return -ENOMEM;
@@ -2082,7 +2128,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
int nrpages = MAX_BIO_BLOCKS(sbi);
do {
- readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT);
+ readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
start = start_blk * sit_i->sents_per_block;
end = (start_blk + readed) * sit_i->sents_per_block;
@@ -2174,7 +2220,7 @@ static int init_victim_secmap(struct f2fs_sb_info *sbi)
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
- dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
+ dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
if (!dirty_i->victim_secmap)
return -ENOMEM;
return 0;
@@ -2196,7 +2242,7 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
for (i = 0; i < NR_DIRTY_TYPE; i++) {
- dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
+ dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
if (!dirty_i->dirty_segmap[i])
return -ENOMEM;
}
@@ -2301,7 +2347,7 @@ static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
mutex_lock(&dirty_i->seglist_lock);
- kfree(dirty_i->dirty_segmap[dirty_type]);
+ kvfree(dirty_i->dirty_segmap[dirty_type]);
dirty_i->nr_dirty[dirty_type] = 0;
mutex_unlock(&dirty_i->seglist_lock);
}
@@ -2309,7 +2355,7 @@ static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- kfree(dirty_i->victim_secmap);
+ kvfree(dirty_i->victim_secmap);
}
static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
@@ -2348,8 +2394,8 @@ static void destroy_free_segmap(struct f2fs_sb_info *sbi)
if (!free_i)
return;
SM_I(sbi)->free_info = NULL;
- kfree(free_i->free_segmap);
- kfree(free_i->free_secmap);
+ kvfree(free_i->free_segmap);
+ kvfree(free_i->free_secmap);
kfree(free_i);
}
@@ -2370,9 +2416,9 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi)
}
kfree(sit_i->tmp_map);
- vfree(sit_i->sentries);
- vfree(sit_i->sec_entries);
- kfree(sit_i->dirty_sentries_bitmap);
+ kvfree(sit_i->sentries);
+ kvfree(sit_i->sec_entries);
+ kvfree(sit_i->dirty_sentries_bitmap);
SM_I(sbi)->sit_info = NULL;
kfree(sit_i->sit_bitmap);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index b6e4ed15c698..ee44d346ea44 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -137,10 +137,12 @@ enum {
/*
* BG_GC means the background cleaning job.
* FG_GC means the on-demand cleaning job.
+ * FORCE_FG_GC means on-demand cleaning job in background.
*/
enum {
BG_GC = 0,
- FG_GC
+ FG_GC,
+ FORCE_FG_GC,
};
/* for a function parameter to select a victim segment */
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index f79478115d37..3a65e0132352 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -213,8 +213,10 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
+F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, cp_interval);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -231,6 +233,8 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(max_victim_search),
ATTR_LIST(dir_level),
ATTR_LIST(ram_thresh),
+ ATTR_LIST(ra_nid_pages),
+ ATTR_LIST(cp_interval),
NULL,
};
@@ -292,11 +296,16 @@ static int parse_options(struct super_block *sb, char *options)
if (!name)
return -ENOMEM;
- if (strlen(name) == 2 && !strncmp(name, "on", 2))
+ if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
set_opt(sbi, BG_GC);
- else if (strlen(name) == 3 && !strncmp(name, "off", 3))
+ clear_opt(sbi, FORCE_FG_GC);
+ } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
clear_opt(sbi, BG_GC);
- else {
+ clear_opt(sbi, FORCE_FG_GC);
+ } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
+ set_opt(sbi, BG_GC);
+ set_opt(sbi, FORCE_FG_GC);
+ } else {
kfree(name);
return -EINVAL;
}
@@ -631,10 +640,14 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
{
struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
- if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC))
- seq_printf(seq, ",background_gc=%s", "on");
- else
+ if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
+ if (test_opt(sbi, FORCE_FG_GC))
+ seq_printf(seq, ",background_gc=%s", "sync");
+ else
+ seq_printf(seq, ",background_gc=%s", "on");
+ } else {
seq_printf(seq, ",background_gc=%s", "off");
+ }
if (test_opt(sbi, DISABLE_ROLL_FORWARD))
seq_puts(seq, ",disable_roll_forward");
if (test_opt(sbi, DISCARD))
@@ -742,6 +755,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
int err, active_logs;
bool need_restart_gc = false;
bool need_stop_gc = false;
+ bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
sync_filesystem(sb);
@@ -767,6 +781,14 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
goto skip;
+ /* disallow enable/disable extent_cache dynamically */
+ if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
+ err = -EINVAL;
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "switch extent_cache option is not allowed");
+ goto restore_opts;
+ }
+
/*
* We stop the GC thread if FS is mounted as RO
* or if background_gc = off is passed in mount
@@ -996,6 +1018,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
atomic_set(&sbi->nr_pages[i], 0);
sbi->dir_level = DEF_DIR_LEVEL;
+ sbi->cp_interval = DEF_CP_INTERVAL;
clear_sbi_flag(sbi, SBI_NEED_FSCK);
INIT_LIST_HEAD(&sbi->s_list);
@@ -1332,6 +1355,8 @@ try_onemore:
f2fs_commit_super(sbi, true);
}
+ sbi->cp_expires = round_jiffies_up(jiffies);
+
return 0;
free_kobj:
diff --git a/fs/file.c b/fs/file.c
index c6986dce0334..39f8f15921da 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -60,8 +60,31 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
#define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
/*
- * Expand the fdset in the files_struct. Called with the files spinlock
- * held for write.
+ * Copy 'count' fd bits from the old table to the new table and clear the extra
+ * space if any. This does not copy the file pointers. Called with the files
+ * spinlock held for write.
+ */
+static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
+ unsigned int count)
+{
+ unsigned int cpy, set;
+
+ cpy = count / BITS_PER_BYTE;
+ set = (nfdt->max_fds - count) / BITS_PER_BYTE;
+ memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
+ memset((char *)nfdt->open_fds + cpy, 0, set);
+ memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
+ memset((char *)nfdt->close_on_exec + cpy, 0, set);
+
+ cpy = BITBIT_SIZE(count);
+ set = BITBIT_SIZE(nfdt->max_fds) - cpy;
+ memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
+ memset((char *)nfdt->full_fds_bits + cpy, 0, set);
+}
+
+/*
+ * Copy all file descriptors from the old table to the new, expanded table and
+ * clear the extra space. Called with the files spinlock held for write.
*/
static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
{
@@ -72,19 +95,9 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
cpy = ofdt->max_fds * sizeof(struct file *);
set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
memcpy(nfdt->fd, ofdt->fd, cpy);
- memset((char *)(nfdt->fd) + cpy, 0, set);
+ memset((char *)nfdt->fd + cpy, 0, set);
- cpy = ofdt->max_fds / BITS_PER_BYTE;
- set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
- memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
- memset((char *)(nfdt->open_fds) + cpy, 0, set);
- memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
- memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
-
- cpy = BITBIT_SIZE(ofdt->max_fds);
- set = BITBIT_SIZE(nfdt->max_fds) - cpy;
- memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
- memset(cpy+(char *)nfdt->full_fds_bits, 0, set);
+ copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
}
static struct fdtable * alloc_fdtable(unsigned int nr)
@@ -277,7 +290,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
{
struct files_struct *newf;
struct file **old_fds, **new_fds;
- int open_files, size, i;
+ int open_files, i;
struct fdtable *old_fdt, *new_fdt;
*errorp = -ENOMEM;
@@ -334,13 +347,11 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
open_files = count_open_files(old_fdt);
}
+ copy_fd_bitmaps(new_fdt, old_fdt, open_files);
+
old_fds = old_fdt->fd;
new_fds = new_fdt->fd;
- memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8);
- memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8);
- memcpy(new_fdt->full_fds_bits, old_fdt->full_fds_bits, BITBIT_SIZE(open_files));
-
for (i = open_files; i != 0; i--) {
struct file *f = *old_fds++;
if (f) {
@@ -358,19 +369,8 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
}
spin_unlock(&oldf->file_lock);
- /* compute the remainder to be cleared */
- size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
-
- /* This is long word aligned thus could use a optimized version */
- memset(new_fds, 0, size);
-
- if (new_fdt->max_fds > open_files) {
- int left = (new_fdt->max_fds - open_files) / 8;
- int start = open_files / BITS_PER_LONG;
-
- memset(&new_fdt->open_fds[start], 0, left);
- memset(&new_fdt->close_on_exec[start], 0, left);
- }
+ /* clear the remainder */
+ memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
rcu_assign_pointer(newf->fdt, new_fdt);
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index 51dde817e1f2..6b028b7c4250 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -316,7 +316,7 @@ static const struct seq_operations fscache_objlist_ops = {
static void fscache_objlist_config(struct fscache_objlist_data *data)
{
#ifdef CONFIG_KEYS
- struct user_key_payload *confkey;
+ const struct user_key_payload *confkey;
unsigned long config;
struct key *key;
const char *buf;
@@ -329,7 +329,7 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
config = 0;
rcu_read_lock();
- confkey = key->payload.data;
+ confkey = user_key_payload(key);
buf = confkey->data;
for (len = confkey->datalen - 1; len >= 0; len--) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index f523f2f04c19..e0faf8f2c868 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2189,7 +2189,7 @@ static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
int err;
if (fc->no_flock) {
- err = flock_lock_file_wait(file, fl);
+ err = locks_lock_file_wait(file, fl);
} else {
struct fuse_file *ff = file->private_data;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index cf4ab89159f4..9287a2d17b8c 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -1000,7 +1000,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
}
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
if (fl->fl_type == F_UNLCK)
- posix_lock_file_wait(file, fl);
+ locks_lock_file_wait(file, fl);
return -EIO;
}
if (IS_GETLK(cmd))
@@ -1031,7 +1031,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
if (gl) {
if (fl_gh->gh_state == state)
goto out;
- flock_lock_file_wait(file,
+ locks_lock_file_wait(file,
&(struct file_lock){.fl_type = F_UNLCK});
gfs2_glock_dq(fl_gh);
gfs2_holder_reinit(state, flags, fl_gh);
@@ -1056,7 +1056,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
if (error == GLR_TRYFAILED)
error = -EAGAIN;
} else {
- error = flock_lock_file_wait(file, fl);
+ error = locks_lock_file_wait(file, fl);
gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
}
@@ -1071,7 +1071,7 @@ static void do_unflock(struct file *file, struct file_lock *fl)
struct gfs2_holder *fl_gh = &fp->f_fl_gh;
mutex_lock(&fp->f_fl_mutex);
- flock_lock_file_wait(file, fl);
+ locks_lock_file_wait(file, fl);
if (fl_gh->gh_gl) {
gfs2_glock_dq(fl_gh);
gfs2_holder_uninit(fl_gh);
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index acd394716349..112952037933 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -474,18 +474,7 @@ static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *ho
static int do_vfs_lock(struct file_lock *fl)
{
- int res = 0;
- switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
- case FL_POSIX:
- res = posix_lock_file_wait(fl->fl_file, fl);
- break;
- case FL_FLOCK:
- res = flock_lock_file_wait(fl->fl_file, fl);
- break;
- default:
- BUG();
- }
- return res;
+ return locks_lock_file_wait(fl->fl_file, fl);
}
/*
diff --git a/fs/locks.c b/fs/locks.c
index 2a54c800a223..0d2b3267e2a3 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -205,28 +205,32 @@ static struct kmem_cache *filelock_cache __read_mostly;
static struct file_lock_context *
locks_get_lock_context(struct inode *inode, int type)
{
- struct file_lock_context *new;
+ struct file_lock_context *ctx;
- if (likely(inode->i_flctx) || type == F_UNLCK)
+ /* paired with cmpxchg() below */
+ ctx = smp_load_acquire(&inode->i_flctx);
+ if (likely(ctx) || type == F_UNLCK)
goto out;
- new = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
- if (!new)
+ ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
+ if (!ctx)
goto out;
- spin_lock_init(&new->flc_lock);
- INIT_LIST_HEAD(&new->flc_flock);
- INIT_LIST_HEAD(&new->flc_posix);
- INIT_LIST_HEAD(&new->flc_lease);
+ spin_lock_init(&ctx->flc_lock);
+ INIT_LIST_HEAD(&ctx->flc_flock);
+ INIT_LIST_HEAD(&ctx->flc_posix);
+ INIT_LIST_HEAD(&ctx->flc_lease);
/*
* Assign the pointer if it's not already assigned. If it is, then
* free the context we just allocated.
*/
- if (cmpxchg(&inode->i_flctx, NULL, new))
- kmem_cache_free(flctx_cache, new);
+ if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
+ kmem_cache_free(flctx_cache, ctx);
+ ctx = smp_load_acquire(&inode->i_flctx);
+ }
out:
- return inode->i_flctx;
+ return ctx;
}
void
@@ -762,7 +766,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
struct file_lock_context *ctx;
struct inode *inode = file_inode(filp);
- ctx = inode->i_flctx;
+ ctx = smp_load_acquire(&inode->i_flctx);
if (!ctx || list_empty_careful(&ctx->flc_posix)) {
fl->fl_type = F_UNLCK;
return;
@@ -1167,10 +1171,9 @@ EXPORT_SYMBOL(posix_lock_file);
* @inode: inode of file to which lock request should be applied
* @fl: The lock to be applied
*
- * Variant of posix_lock_file_wait that does not take a filp, and so can be
- * used after the filp has already been torn down.
+ * Apply a POSIX style lock request to an inode.
*/
-int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
{
int error;
might_sleep ();
@@ -1187,7 +1190,6 @@ int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
}
return error;
}
-EXPORT_SYMBOL(posix_lock_inode_wait);
/**
* locks_mandatory_locked - Check for an active lock
@@ -1203,7 +1205,7 @@ int locks_mandatory_locked(struct file *file)
struct file_lock_context *ctx;
struct file_lock *fl;
- ctx = inode->i_flctx;
+ ctx = smp_load_acquire(&inode->i_flctx);
if (!ctx || list_empty_careful(&ctx->flc_posix))
return 0;
@@ -1388,7 +1390,7 @@ any_leases_conflict(struct inode *inode, struct file_lock *breaker)
int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
{
int error = 0;
- struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock_context *ctx;
struct file_lock *new_fl, *fl, *tmp;
unsigned long break_time;
int want_write = (mode & O_ACCMODE) != O_RDONLY;
@@ -1400,6 +1402,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
new_fl->fl_flags = type;
/* typically we will check that ctx is non-NULL before calling */
+ ctx = smp_load_acquire(&inode->i_flctx);
if (!ctx) {
WARN_ON_ONCE(1);
return error;
@@ -1494,9 +1497,10 @@ EXPORT_SYMBOL(__break_lease);
void lease_get_mtime(struct inode *inode, struct timespec *time)
{
bool has_lease = false;
- struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock_context *ctx;
struct file_lock *fl;
+ ctx = smp_load_acquire(&inode->i_flctx);
if (ctx && !list_empty_careful(&ctx->flc_lease)) {
spin_lock(&ctx->flc_lock);
if (!list_empty(&ctx->flc_lease)) {
@@ -1543,10 +1547,11 @@ int fcntl_getlease(struct file *filp)
{
struct file_lock *fl;
struct inode *inode = file_inode(filp);
- struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock_context *ctx;
int type = F_UNLCK;
LIST_HEAD(dispose);
+ ctx = smp_load_acquire(&inode->i_flctx);
if (ctx && !list_empty_careful(&ctx->flc_lease)) {
spin_lock(&ctx->flc_lock);
time_out_leases(file_inode(filp), &dispose);
@@ -1711,11 +1716,11 @@ static int generic_delete_lease(struct file *filp, void *owner)
{
int error = -EAGAIN;
struct file_lock *fl, *victim = NULL;
- struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
- struct file_lock_context *ctx = inode->i_flctx;
+ struct inode *inode = file_inode(filp);
+ struct file_lock_context *ctx;
LIST_HEAD(dispose);
+ ctx = smp_load_acquire(&inode->i_flctx);
if (!ctx) {
trace_generic_delete_lease(inode, NULL);
return error;
@@ -1751,8 +1756,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
void **priv)
{
- struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int error;
if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
@@ -1856,7 +1860,7 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
*
* Apply a FLOCK style lock request to an inode.
*/
-int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
{
int error;
might_sleep();
@@ -1873,7 +1877,30 @@ int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
}
return error;
}
-EXPORT_SYMBOL(flock_lock_inode_wait);
+
+/**
+ * locks_lock_inode_wait - Apply a lock to an inode
+ * @inode: inode of the file to apply to
+ * @fl: The lock to be applied
+ *
+ * Apply a POSIX or FLOCK style lock request to an inode.
+ */
+int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+{
+ int res = 0;
+ switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
+ case FL_POSIX:
+ res = posix_lock_inode_wait(inode, fl);
+ break;
+ case FL_FLOCK:
+ res = flock_lock_inode_wait(inode, fl);
+ break;
+ default:
+ BUG();
+ }
+ return res;
+}
+EXPORT_SYMBOL(locks_lock_inode_wait);
/**
* sys_flock: - flock() system call.
@@ -1931,7 +1958,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
(can_sleep) ? F_SETLKW : F_SETLK,
lock);
else
- error = flock_lock_file_wait(f.file, lock);
+ error = locks_lock_file_wait(f.file, lock);
out_free:
locks_free_lock(lock);
@@ -2107,7 +2134,7 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
return error;
}
-/* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */
+/* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
static int
check_fmode_for_setlk(struct file_lock *fl)
{
@@ -2359,13 +2386,14 @@ out:
void locks_remove_posix(struct file *filp, fl_owner_t owner)
{
struct file_lock lock;
- struct file_lock_context *ctx = file_inode(filp)->i_flctx;
+ struct file_lock_context *ctx;
/*
* If there are no locks held on this file, we don't need to call
* posix_lock_file(). Another process could be setting a lock on this
* file at the same time, but we wouldn't remove that lock anyway.
*/
+ ctx = smp_load_acquire(&file_inode(filp)->i_flctx);
if (!ctx || list_empty(&ctx->flc_posix))
return;
@@ -2389,7 +2417,7 @@ EXPORT_SYMBOL(locks_remove_posix);
/* The i_flctx must be valid when calling into here */
static void
-locks_remove_flock(struct file *filp)
+locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
{
struct file_lock fl = {
.fl_owner = filp,
@@ -2400,7 +2428,6 @@ locks_remove_flock(struct file *filp)
.fl_end = OFFSET_MAX,
};
struct inode *inode = file_inode(filp);
- struct file_lock_context *flctx = inode->i_flctx;
if (list_empty(&flctx->flc_flock))
return;
@@ -2416,10 +2443,8 @@ locks_remove_flock(struct file *filp)
/* The i_flctx must be valid when calling into here */
static void
-locks_remove_lease(struct file *filp)
+locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
{
- struct inode *inode = file_inode(filp);
- struct file_lock_context *ctx = inode->i_flctx;
struct file_lock *fl, *tmp;
LIST_HEAD(dispose);
@@ -2439,17 +2464,20 @@ locks_remove_lease(struct file *filp)
*/
void locks_remove_file(struct file *filp)
{
- if (!file_inode(filp)->i_flctx)
+ struct file_lock_context *ctx;
+
+ ctx = smp_load_acquire(&file_inode(filp)->i_flctx);
+ if (!ctx)
return;
/* remove any OFD locks */
locks_remove_posix(filp, filp);
/* remove flock locks */
- locks_remove_flock(filp);
+ locks_remove_flock(filp, ctx);
/* remove any leases */
- locks_remove_lease(filp);
+ locks_remove_lease(filp, ctx);
}
/**
@@ -2616,7 +2644,7 @@ void show_fd_locks(struct seq_file *f,
struct file_lock_context *ctx;
int id = 0;
- ctx = inode->i_flctx;
+ ctx = smp_load_acquire(&inode->i_flctx);
if (!ctx)
return;
diff --git a/fs/namei.c b/fs/namei.c
index 33e9495a3129..0d3340b32e14 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -955,26 +955,23 @@ static bool safe_hardlink_source(struct inode *inode)
* - sysctl_protected_hardlinks enabled
* - fsuid does not match inode
* - hardlink source is unsafe (see safe_hardlink_source() above)
- * - not CAP_FOWNER
+ * - not CAP_FOWNER in a namespace with the inode owner uid mapped
*
* Returns 0 if successful, -ve on error.
*/
static int may_linkat(struct path *link)
{
- const struct cred *cred;
struct inode *inode;
if (!sysctl_protected_hardlinks)
return 0;
- cred = current_cred();
inode = link->dentry->d_inode;
/* Source inode owner (or CAP_FOWNER) can hardlink all they like,
* otherwise, it must be a safe source.
*/
- if (uid_eq(cred->fsuid, inode->i_uid) || safe_hardlink_source(inode) ||
- capable(CAP_FOWNER))
+ if (inode_owner_or_capable(inode) || safe_hardlink_source(inode))
return 0;
audit_log_link_denied("linkat", link);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index c0f9b1ed12b9..37f639d50af5 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -738,18 +738,7 @@ out_noconflict:
static int do_vfs_lock(struct file *file, struct file_lock *fl)
{
- int res = 0;
- switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
- case FL_POSIX:
- res = posix_lock_file_wait(file, fl);
- break;
- case FL_FLOCK:
- res = flock_lock_file_wait(file, fl);
- break;
- default:
- BUG();
- }
- return res;
+ return locks_lock_file_wait(file, fl);
}
static int
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 2e4902203c35..5ba22c6b0ffa 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -297,7 +297,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
{
const struct cred *saved_cred;
struct key *rkey;
- struct user_key_payload *payload;
+ const struct user_key_payload *payload;
ssize_t ret;
saved_cred = override_creds(id_resolver_cache);
@@ -316,7 +316,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
if (ret < 0)
goto out_up;
- payload = rcu_dereference(rkey->payload.rcudata);
+ payload = user_key_payload(rkey);
if (IS_ERR_OR_NULL(payload)) {
ret = PTR_ERR(payload);
goto out_up;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 5133bb18830e..0e5ff69455c7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -5513,18 +5513,7 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *
static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
{
- int res = 0;
- switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
- case FL_POSIX:
- res = posix_lock_inode_wait(inode, fl);
- break;
- case FL_FLOCK:
- res = flock_lock_inode_wait(inode, fl);
- break;
- default:
- BUG();
- }
- return res;
+ return locks_lock_inode_wait(inode, fl);
}
struct nfs4_unlockdata {
diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c
index 6b6d092b0998..652ece4a9d9e 100644
--- a/fs/ocfs2/locks.c
+++ b/fs/ocfs2/locks.c
@@ -66,7 +66,7 @@ static int ocfs2_do_flock(struct file *file, struct inode *inode,
* level.
*/
- flock_lock_file_wait(file,
+ locks_lock_file_wait(file,
&(struct file_lock){.fl_type = F_UNLCK});
ocfs2_file_unlock(file);
@@ -81,7 +81,7 @@ static int ocfs2_do_flock(struct file *file, struct inode *inode,
goto out;
}
- ret = flock_lock_file_wait(file, fl);
+ ret = locks_lock_file_wait(file, fl);
if (ret)
ocfs2_file_unlock(file);
@@ -98,7 +98,7 @@ static int ocfs2_do_funlock(struct file *file, int cmd, struct file_lock *fl)
mutex_lock(&fp->fp_mutex);
ocfs2_file_unlock(file);
- ret = flock_lock_file_wait(file, fl);
+ ret = locks_lock_file_wait(file, fl);
mutex_unlock(&fp->fp_mutex);
return ret;
@@ -119,7 +119,7 @@ int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl)
if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) ||
ocfs2_mount_local(osb))
- return flock_lock_file_wait(file, fl);
+ return locks_lock_file_wait(file, fl);
if (fl->fl_type == F_UNLCK)
return ocfs2_do_funlock(file, cmd, fl);
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 916b8e23d968..360ae43f590c 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -1,5 +1,5 @@
config PSTORE
- bool "Persistent store support"
+ tristate "Persistent store support"
default n
select ZLIB_DEFLATE
select ZLIB_INFLATE
diff --git a/fs/pstore/Makefile b/fs/pstore/Makefile
index e647d8e81712..b8803cc07fce 100644
--- a/fs/pstore/Makefile
+++ b/fs/pstore/Makefile
@@ -2,12 +2,12 @@
# Makefile for the linux pstorefs routines.
#
-obj-y += pstore.o
+obj-$(CONFIG_PSTORE) += pstore.o
pstore-objs += inode.o platform.o
-obj-$(CONFIG_PSTORE_FTRACE) += ftrace.o
+pstore-$(CONFIG_PSTORE_FTRACE) += ftrace.o
-obj-$(CONFIG_PSTORE_PMSG) += pmsg.o
+pstore-$(CONFIG_PSTORE_PMSG) += pmsg.o
ramoops-objs += ram.o ram_core.o
obj-$(CONFIG_PSTORE_RAM) += ramoops.o
diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c
index 76a4eeb92982..d4887705bb61 100644
--- a/fs/pstore/ftrace.c
+++ b/fs/pstore/ftrace.c
@@ -104,22 +104,23 @@ static const struct file_operations pstore_knob_fops = {
.write = pstore_ftrace_knob_write,
};
+static struct dentry *pstore_ftrace_dir;
+
void pstore_register_ftrace(void)
{
- struct dentry *dir;
struct dentry *file;
if (!psinfo->write_buf)
return;
- dir = debugfs_create_dir("pstore", NULL);
- if (!dir) {
+ pstore_ftrace_dir = debugfs_create_dir("pstore", NULL);
+ if (!pstore_ftrace_dir) {
pr_err("%s: unable to create pstore directory\n", __func__);
return;
}
- file = debugfs_create_file("record_ftrace", 0600, dir, NULL,
- &pstore_knob_fops);
+ file = debugfs_create_file("record_ftrace", 0600, pstore_ftrace_dir,
+ NULL, &pstore_knob_fops);
if (!file) {
pr_err("%s: unable to create record_ftrace file\n", __func__);
goto err_file;
@@ -127,5 +128,17 @@ void pstore_register_ftrace(void)
return;
err_file:
- debugfs_remove(dir);
+ debugfs_remove(pstore_ftrace_dir);
+}
+
+void pstore_unregister_ftrace(void)
+{
+ mutex_lock(&pstore_ftrace_lock);
+ if (pstore_ftrace_enabled) {
+ unregister_ftrace_function(&pstore_ftrace_ops);
+ pstore_ftrace_enabled = 0;
+ }
+ mutex_unlock(&pstore_ftrace_lock);
+
+ debugfs_remove_recursive(pstore_ftrace_dir);
}
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 3adcc4669fac..d8c439d813ce 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -178,6 +178,7 @@ static loff_t pstore_file_llseek(struct file *file, loff_t off, int whence)
}
static const struct file_operations pstore_file_operations = {
+ .owner = THIS_MODULE,
.open = pstore_file_open,
.read = pstore_file_read,
.llseek = pstore_file_llseek,
@@ -287,7 +288,7 @@ static const struct super_operations pstore_ops = {
static struct super_block *pstore_sb;
-int pstore_is_mounted(void)
+bool pstore_is_mounted(void)
{
return pstore_sb != NULL;
}
@@ -456,6 +457,7 @@ static void pstore_kill_sb(struct super_block *sb)
}
static struct file_system_type pstore_fs_type = {
+ .owner = THIS_MODULE,
.name = "pstore",
.mount = pstore_mount,
.kill_sb = pstore_kill_sb,
@@ -479,5 +481,12 @@ out:
}
module_init(init_pstore_fs)
+static void __exit exit_pstore_fs(void)
+{
+ unregister_filesystem(&pstore_fs_type);
+ sysfs_remove_mount_point(fs_kobj, "pstore");
+}
+module_exit(exit_pstore_fs)
+
MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index c36ba2cd0b5d..e38a22b31282 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -41,14 +41,18 @@ pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
#ifdef CONFIG_PSTORE_FTRACE
extern void pstore_register_ftrace(void);
+extern void pstore_unregister_ftrace(void);
#else
static inline void pstore_register_ftrace(void) {}
+static inline void pstore_unregister_ftrace(void) {}
#endif
#ifdef CONFIG_PSTORE_PMSG
extern void pstore_register_pmsg(void);
+extern void pstore_unregister_pmsg(void);
#else
static inline void pstore_register_pmsg(void) {}
+static inline void pstore_unregister_pmsg(void) {}
#endif
extern struct pstore_info *psinfo;
@@ -59,6 +63,6 @@ extern int pstore_mkfile(enum pstore_type_id, char *psname, u64 id,
int count, char *data, bool compressed,
size_t size, struct timespec time,
struct pstore_info *psi);
-extern int pstore_is_mounted(void);
+extern bool pstore_is_mounted(void);
#endif
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 791743deedf1..588461bb2dd4 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -237,6 +237,14 @@ static void allocate_buf_for_compression(void)
}
+static void free_buf_for_compression(void)
+{
+ kfree(stream.workspace);
+ stream.workspace = NULL;
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+}
+
/*
* Called when compression fails, since the printk buffer
* would be fetched for compression calling it again when
@@ -353,6 +361,19 @@ static struct kmsg_dumper pstore_dumper = {
.dump = pstore_dump,
};
+/*
+ * Register with kmsg_dump to save last part of console log on panic.
+ */
+static void pstore_register_kmsg(void)
+{
+ kmsg_dump_register(&pstore_dumper);
+}
+
+static void pstore_unregister_kmsg(void)
+{
+ kmsg_dump_unregister(&pstore_dumper);
+}
+
#ifdef CONFIG_PSTORE_CONSOLE
static void pstore_console_write(struct console *con, const char *s, unsigned c)
{
@@ -390,8 +411,14 @@ static void pstore_register_console(void)
{
register_console(&pstore_console);
}
+
+static void pstore_unregister_console(void)
+{
+ unregister_console(&pstore_console);
+}
#else
static void pstore_register_console(void) {}
+static void pstore_unregister_console(void) {}
#endif
static int pstore_write_compat(enum pstore_type_id type,
@@ -410,8 +437,6 @@ static int pstore_write_compat(enum pstore_type_id type,
* read function right away to populate the file system. If not
* then the pstore mount code will call us later to fill out
* the file system.
- *
- * Register with kmsg_dump to save last part of console log on panic.
*/
int pstore_register(struct pstore_info *psi)
{
@@ -442,7 +467,7 @@ int pstore_register(struct pstore_info *psi)
if (pstore_is_mounted())
pstore_get_records(0);
- kmsg_dump_register(&pstore_dumper);
+ pstore_register_kmsg();
if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
pstore_register_console();
@@ -462,12 +487,28 @@ int pstore_register(struct pstore_info *psi)
*/
backend = psi->name;
+ module_put(owner);
+
pr_info("Registered %s as persistent store backend\n", psi->name);
return 0;
}
EXPORT_SYMBOL_GPL(pstore_register);
+void pstore_unregister(struct pstore_info *psi)
+{
+ pstore_unregister_pmsg();
+ pstore_unregister_ftrace();
+ pstore_unregister_console();
+ pstore_unregister_kmsg();
+
+ free_buf_for_compression();
+
+ psinfo = NULL;
+ backend = NULL;
+}
+EXPORT_SYMBOL_GPL(pstore_unregister);
+
/*
* Read all the records from the persistent store. Create
* files in our filesystem. Don't warn about -EEXIST errors
diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
index feb5dd2948b4..7de20cd3797f 100644
--- a/fs/pstore/pmsg.c
+++ b/fs/pstore/pmsg.c
@@ -37,6 +37,8 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf,
if (buffer_size > PMSG_MAX_BOUNCE_BUFFER_SIZE)
buffer_size = PMSG_MAX_BOUNCE_BUFFER_SIZE;
buffer = vmalloc(buffer_size);
+ if (!buffer)
+ return -ENOMEM;
mutex_lock(&pmsg_lock);
for (i = 0; i < count; ) {
@@ -112,3 +114,10 @@ err_class:
err:
return;
}
+
+void pstore_unregister_pmsg(void)
+{
+ device_destroy(pmsg_class, MKDEV(pmsg_major, 0));
+ class_destroy(pmsg_class);
+ unregister_chrdev(pmsg_major, PMSG_NAME);
+}
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 6c26c4daaec9..319c3a60cfa5 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -578,30 +578,27 @@ fail_out:
return err;
}
-static int __exit ramoops_remove(struct platform_device *pdev)
+static int ramoops_remove(struct platform_device *pdev)
{
-#if 0
- /* TODO(kees): We cannot unload ramoops since pstore doesn't support
- * unregistering yet.
- */
struct ramoops_context *cxt = &oops_cxt;
- iounmap(cxt->virt_addr);
- release_mem_region(cxt->phys_addr, cxt->size);
+ pstore_unregister(&cxt->pstore);
cxt->max_dump_cnt = 0;
- /* TODO(kees): When pstore supports unregistering, call it here. */
kfree(cxt->pstore.buf);
cxt->pstore.bufsize = 0;
+ persistent_ram_free(cxt->mprz);
+ persistent_ram_free(cxt->fprz);
+ persistent_ram_free(cxt->cprz);
+ ramoops_free_przs(cxt);
+
return 0;
-#endif
- return -EBUSY;
}
static struct platform_driver ramoops_driver = {
.probe = ramoops_probe,
- .remove = __exit_p(ramoops_remove),
+ .remove = ramoops_remove,
.driver = {
.name = "ramoops",
},
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 39a019936768..e1236594fffe 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -352,3 +352,47 @@ void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
}
}
EXPORT_SYMBOL_GPL(sysfs_remove_link_from_group);
+
+/**
+ * __compat_only_sysfs_link_entry_to_kobj - add a symlink to a kobject pointing
+ * to a group or an attribute
+ * @kobj: The kobject containing the group.
+ * @target_kobj: The target kobject.
+ * @target_name: The name of the target group or attribute.
+ */
+int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
+ struct kobject *target_kobj,
+ const char *target_name)
+{
+ struct kernfs_node *target;
+ struct kernfs_node *entry;
+ struct kernfs_node *link;
+
+ /*
+ * We don't own @target_kobj and it may be removed at any time.
+ * Synchronize using sysfs_symlink_target_lock. See sysfs_remove_dir()
+ * for details.
+ */
+ spin_lock(&sysfs_symlink_target_lock);
+ target = target_kobj->sd;
+ if (target)
+ kernfs_get(target);
+ spin_unlock(&sysfs_symlink_target_lock);
+ if (!target)
+ return -ENOENT;
+
+ entry = kernfs_find_and_get(target_kobj->sd, target_name);
+ if (!entry) {
+ kernfs_put(target);
+ return -ENOENT;
+ }
+
+ link = kernfs_create_link(kobj->sd, target_name, entry);
+ if (IS_ERR(link) && PTR_ERR(link) == -EEXIST)
+ sysfs_warn_dup(kobj->sd, target_name);
+
+ kernfs_put(entry);
+ kernfs_put(target);
+ return IS_ERR(link) ? PTR_ERR(link) : 0;
+}
+EXPORT_SYMBOL_GPL(__compat_only_sysfs_link_entry_to_kobj);
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 067c242b1e15..cc2516df0efa 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,7 +15,6 @@
#define _LINUX_PUBLIC_KEY_H
#include <linux/mpi.h>
-#include <keys/asymmetric-type.h>
#include <crypto/hash_info.h>
enum pkey_algo {
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
index 0b4cb999a3f7..ab3ee241d10c 100644
--- a/include/dt-bindings/clock/at91.h
+++ b/include/dt-bindings/clock/at91.h
@@ -18,5 +18,6 @@
#define AT91_PMC_MOSCSELS 16 /* Main Oscillator Selection */
#define AT91_PMC_MOSCRCS 17 /* Main On-Chip RC */
#define AT91_PMC_CFDEV 18 /* Clock Failure Detector Event */
+#define AT91_PMC_GCKRDY 24 /* Generated Clocks */
#endif
diff --git a/include/dt-bindings/clock/bcm-ns2.h b/include/dt-bindings/clock/bcm-ns2.h
new file mode 100644
index 000000000000..d99c7a2e70cb
--- /dev/null
+++ b/include/dt-bindings/clock/bcm-ns2.h
@@ -0,0 +1,72 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Broadcom Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CLOCK_BCM_NS2_H
+#define _CLOCK_BCM_NS2_H
+
+/* GENPLL SCR clock channel ID */
+#define BCM_NS2_GENPLL_SCR 0
+#define BCM_NS2_GENPLL_SCR_SCR_CLK 1
+#define BCM_NS2_GENPLL_SCR_FS_CLK 2
+#define BCM_NS2_GENPLL_SCR_AUDIO_CLK 3
+#define BCM_NS2_GENPLL_SCR_CH3_UNUSED 4
+#define BCM_NS2_GENPLL_SCR_CH4_UNUSED 5
+#define BCM_NS2_GENPLL_SCR_CH5_UNUSED 6
+
+/* GENPLL SW clock channel ID */
+#define BCM_NS2_GENPLL_SW 0
+#define BCM_NS2_GENPLL_SW_RPE_CLK 1
+#define BCM_NS2_GENPLL_SW_250_CLK 2
+#define BCM_NS2_GENPLL_SW_NIC_CLK 3
+#define BCM_NS2_GENPLL_SW_CHIMP_CLK 4
+#define BCM_NS2_GENPLL_SW_PORT_CLK 5
+#define BCM_NS2_GENPLL_SW_SDIO_CLK 6
+
+/* LCPLL DDR clock channel ID */
+#define BCM_NS2_LCPLL_DDR 0
+#define BCM_NS2_LCPLL_DDR_PCIE_SATA_USB_CLK 1
+#define BCM_NS2_LCPLL_DDR_DDR_CLK 2
+#define BCM_NS2_LCPLL_DDR_CH2_UNUSED 3
+#define BCM_NS2_LCPLL_DDR_CH3_UNUSED 4
+#define BCM_NS2_LCPLL_DDR_CH4_UNUSED 5
+#define BCM_NS2_LCPLL_DDR_CH5_UNUSED 6
+
+/* LCPLL PORTS clock channel ID */
+#define BCM_NS2_LCPLL_PORTS 0
+#define BCM_NS2_LCPLL_PORTS_WAN_CLK 1
+#define BCM_NS2_LCPLL_PORTS_RGMII_CLK 2
+#define BCM_NS2_LCPLL_PORTS_CH2_UNUSED 3
+#define BCM_NS2_LCPLL_PORTS_CH3_UNUSED 4
+#define BCM_NS2_LCPLL_PORTS_CH4_UNUSED 5
+#define BCM_NS2_LCPLL_PORTS_CH5_UNUSED 6
+
+#endif /* _CLOCK_BCM_NS2_H */
diff --git a/include/dt-bindings/clock/bcm-nsp.h b/include/dt-bindings/clock/bcm-nsp.h
new file mode 100644
index 000000000000..ad5827cde782
--- /dev/null
+++ b/include/dt-bindings/clock/bcm-nsp.h
@@ -0,0 +1,51 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Broadcom Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CLOCK_BCM_NSP_H
+#define _CLOCK_BCM_NSP_H
+
+/* GENPLL clock channel ID */
+#define BCM_NSP_GENPLL 0
+#define BCM_NSP_GENPLL_PHY_CLK 1
+#define BCM_NSP_GENPLL_ENET_SW_CLK 2
+#define BCM_NSP_GENPLL_USB_PHY_REF_CLK 3
+#define BCM_NSP_GENPLL_IPROCFAST_CLK 4
+#define BCM_NSP_GENPLL_SATA1_CLK 5
+#define BCM_NSP_GENPLL_SATA2_CLK 6
+
+/* LCPLL0 clock channel ID */
+#define BCM_NSP_LCPLL0 0
+#define BCM_NSP_LCPLL0_PCIE_PHY_REF_CLK 1
+#define BCM_NSP_LCPLL0_SDIO_CLK 2
+#define BCM_NSP_LCPLL0_DDR_PHY_CLK 3
+
+#endif /* _CLOCK_BCM_NSP_H */
diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h
new file mode 100644
index 000000000000..d323efac7edf
--- /dev/null
+++ b/include/dt-bindings/clock/bcm2835.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define BCM2835_PLLA 0
+#define BCM2835_PLLB 1
+#define BCM2835_PLLC 2
+#define BCM2835_PLLD 3
+#define BCM2835_PLLH 4
+
+#define BCM2835_PLLA_CORE 5
+#define BCM2835_PLLA_PER 6
+#define BCM2835_PLLB_ARM 7
+#define BCM2835_PLLC_CORE0 8
+#define BCM2835_PLLC_CORE1 9
+#define BCM2835_PLLC_CORE2 10
+#define BCM2835_PLLC_PER 11
+#define BCM2835_PLLD_CORE 12
+#define BCM2835_PLLD_PER 13
+#define BCM2835_PLLH_RCAL 14
+#define BCM2835_PLLH_AUX 15
+#define BCM2835_PLLH_PIX 16
+
+#define BCM2835_CLOCK_TIMER 17
+#define BCM2835_CLOCK_OTP 18
+#define BCM2835_CLOCK_UART 19
+#define BCM2835_CLOCK_VPU 20
+#define BCM2835_CLOCK_V3D 21
+#define BCM2835_CLOCK_ISP 22
+#define BCM2835_CLOCK_H264 23
+#define BCM2835_CLOCK_VEC 24
+#define BCM2835_CLOCK_HSM 25
+#define BCM2835_CLOCK_SDRAM 26
+#define BCM2835_CLOCK_TSENS 27
+#define BCM2835_CLOCK_EMMC 28
+#define BCM2835_CLOCK_PERI_IMAGE 29
+
+#define BCM2835_CLOCK_COUNT 30
diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h
index e33c75a3c09d..10c558611085 100644
--- a/include/dt-bindings/clock/exynos7-clk.h
+++ b/include/dt-bindings/clock/exynos7-clk.h
@@ -21,7 +21,18 @@
#define ACLK_MSCL_532 8
#define DOUT_SCLK_AUD_PLL 9
#define FOUT_AUD_PLL 10
-#define TOPC_NR_CLK 11
+#define SCLK_AUD_PLL 11
+#define SCLK_MFC_PLL_B 12
+#define SCLK_MFC_PLL_A 13
+#define SCLK_BUS1_PLL_B 14
+#define SCLK_BUS1_PLL_A 15
+#define SCLK_BUS0_PLL_B 16
+#define SCLK_BUS0_PLL_A 17
+#define SCLK_CC_PLL_B 18
+#define SCLK_CC_PLL_A 19
+#define ACLK_CCORE_133 20
+#define ACLK_PERIS_66 21
+#define TOPC_NR_CLK 22
/* TOP0 */
#define DOUT_ACLK_PERIC1 1
@@ -38,7 +49,9 @@
#define CLK_SCLK_SPDIF 12
#define CLK_SCLK_PCM1 13
#define CLK_SCLK_I2S1 14
-#define TOP0_NR_CLK 15
+#define CLK_ACLK_PERIC0_66 15
+#define CLK_ACLK_PERIC1_66 16
+#define TOP0_NR_CLK 17
/* TOP1 */
#define DOUT_ACLK_FSYS1_200 1
@@ -49,7 +62,16 @@
#define CLK_SCLK_MMC2 6
#define CLK_SCLK_MMC1 7
#define CLK_SCLK_MMC0 8
-#define TOP1_NR_CLK 9
+#define CLK_ACLK_FSYS0_200 9
+#define CLK_ACLK_FSYS1_200 10
+#define CLK_SCLK_PHY_FSYS1 11
+#define CLK_SCLK_PHY_FSYS1_26M 12
+#define MOUT_SCLK_UFSUNIPRO20 13
+#define DOUT_SCLK_UFSUNIPRO20 14
+#define CLK_SCLK_UFSUNIPRO20 15
+#define DOUT_SCLK_PHY_FSYS1 16
+#define DOUT_SCLK_PHY_FSYS1_26M 17
+#define TOP1_NR_CLK 18
/* CCORE */
#define PCLK_RTC 1
@@ -124,7 +146,20 @@
/* FSYS1 */
#define ACLK_MMC1 1
#define ACLK_MMC0 2
-#define FSYS1_NR_CLK 3
+#define PHYCLK_UFS20_TX0_SYMBOL 3
+#define PHYCLK_UFS20_RX0_SYMBOL 4
+#define PHYCLK_UFS20_RX1_SYMBOL 5
+#define ACLK_UFS20_LINK 6
+#define SCLK_UFSUNIPRO20_USER 7
+#define PHYCLK_UFS20_RX1_SYMBOL_USER 8
+#define PHYCLK_UFS20_RX0_SYMBOL_USER 9
+#define PHYCLK_UFS20_TX0_SYMBOL_USER 10
+#define OSCCLK_PHY_CLKOUT_EMBEDDED_COMBO_PHY 11
+#define SCLK_COMBO_PHY_EMBEDDED_26M 12
+#define DOUT_PCLK_FSYS1 13
+#define PCLK_GPIO_FSYS1 14
+#define MOUT_FSYS1_PHYCLK_SEL1 15
+#define FSYS1_NR_CLK 16
/* MSCL */
#define USERMUX_ACLK_MSCL_532 1
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 8de173ff19f3..77985cc43316 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -254,6 +254,7 @@
#define IMX6QDL_CLK_CAAM_MEM 241
#define IMX6QDL_CLK_CAAM_ACLK 242
#define IMX6QDL_CLK_CAAM_IPG 243
-#define IMX6QDL_CLK_END 244
+#define IMX6QDL_CLK_SPDIF_GCLK 244
+#define IMX6QDL_CLK_END 245
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h
index 9ce4e421096f..e14573e293c5 100644
--- a/include/dt-bindings/clock/imx6sl-clock.h
+++ b/include/dt-bindings/clock/imx6sl-clock.h
@@ -174,6 +174,7 @@
#define IMX6SL_CLK_SSI1_IPG 161
#define IMX6SL_CLK_SSI2_IPG 162
#define IMX6SL_CLK_SSI3_IPG 163
-#define IMX6SL_CLK_END 164
+#define IMX6SL_CLK_SPDIF_GCLK 164
+#define IMX6SL_CLK_END 165
#endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */
diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h
index 995709119ec5..36f0324902a5 100644
--- a/include/dt-bindings/clock/imx6sx-clock.h
+++ b/include/dt-bindings/clock/imx6sx-clock.h
@@ -274,6 +274,7 @@
#define IMX6SX_PLL5_BYPASS 261
#define IMX6SX_PLL6_BYPASS 262
#define IMX6SX_PLL7_BYPASS 263
-#define IMX6SX_CLK_CLK_END 264
+#define IMX6SX_CLK_SPDIF_GCLK 264
+#define IMX6SX_CLK_CLK_END 265
#endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */
diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h
index 728df28b00d5..a4a7a9ce3457 100644
--- a/include/dt-bindings/clock/imx7d-clock.h
+++ b/include/dt-bindings/clock/imx7d-clock.h
@@ -446,5 +446,6 @@
#define IMX7D_MU_ROOT_CLK 433
#define IMX7D_SEMA4_HS_ROOT_CLK 434
#define IMX7D_PLL_DRAM_TEST_DIV 435
-#define IMX7D_CLK_END 436
+#define IMX7D_ADC_ROOT_CLK 436
+#define IMX7D_CLK_END 437
#endif /* __DT_BINDINGS_CLOCK_IMX7D_H */
diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h
index 4ad76ed882ad..7956ba1bc974 100644
--- a/include/dt-bindings/clock/mt8173-clk.h
+++ b/include/dt-bindings/clock/mt8173-clk.h
@@ -18,7 +18,6 @@
/* TOPCKGEN */
#define CLK_TOP_CLKPH_MCK_O 1
-#define CLK_TOP_DPI 2
#define CLK_TOP_USB_SYSPLL_125M 3
#define CLK_TOP_HDMITX_DIG_CTS 4
#define CLK_TOP_ARMCA7PLL_754M 5
@@ -154,12 +153,16 @@
#define CLK_TOP_I2S2_M_SEL 135
#define CLK_TOP_I2S3_M_SEL 136
#define CLK_TOP_I2S3_B_SEL 137
-#define CLK_TOP_NR_CLK 138
+#define CLK_TOP_DSI0_DIG 138
+#define CLK_TOP_DSI1_DIG 139
+#define CLK_TOP_LVDS_PXL 140
+#define CLK_TOP_LVDS_CTS 141
+#define CLK_TOP_NR_CLK 142
/* APMIXED_SYS */
-#define CLK_APMIXED_ARMCA15PLL 1
-#define CLK_APMIXED_ARMCA7PLL 2
+#define CLK_APMIXED_ARMCA15PLL 1
+#define CLK_APMIXED_ARMCA7PLL 2
#define CLK_APMIXED_MAINPLL 3
#define CLK_APMIXED_UNIVPLL 4
#define CLK_APMIXED_MMPLL 5
@@ -172,7 +175,8 @@
#define CLK_APMIXED_APLL2 12
#define CLK_APMIXED_LVDSPLL 13
#define CLK_APMIXED_MSDCPLL2 14
-#define CLK_APMIXED_NR_CLK 15
+#define CLK_APMIXED_REF2USB_TX 15
+#define CLK_APMIXED_NR_CLK 16
/* INFRA_SYS */
@@ -187,7 +191,8 @@
#define CLK_INFRA_CEC 9
#define CLK_INFRA_PMICSPI 10
#define CLK_INFRA_PMICWRAP 11
-#define CLK_INFRA_NR_CLK 12
+#define CLK_INFRA_CLK_13M 12
+#define CLK_INFRA_NR_CLK 13
/* PERI_SYS */
@@ -232,4 +237,91 @@
#define CLK_PERI_UART3_SEL 39
#define CLK_PERI_NR_CLK 40
+/* IMG_SYS */
+
+#define CLK_IMG_LARB2_SMI 1
+#define CLK_IMG_CAM_SMI 2
+#define CLK_IMG_CAM_CAM 3
+#define CLK_IMG_SEN_TG 4
+#define CLK_IMG_SEN_CAM 5
+#define CLK_IMG_CAM_SV 6
+#define CLK_IMG_FD 7
+#define CLK_IMG_NR_CLK 8
+
+/* MM_SYS */
+
+#define CLK_MM_SMI_COMMON 1
+#define CLK_MM_SMI_LARB0 2
+#define CLK_MM_CAM_MDP 3
+#define CLK_MM_MDP_RDMA0 4
+#define CLK_MM_MDP_RDMA1 5
+#define CLK_MM_MDP_RSZ0 6
+#define CLK_MM_MDP_RSZ1 7
+#define CLK_MM_MDP_RSZ2 8
+#define CLK_MM_MDP_TDSHP0 9
+#define CLK_MM_MDP_TDSHP1 10
+#define CLK_MM_MDP_WDMA 11
+#define CLK_MM_MDP_WROT0 12
+#define CLK_MM_MDP_WROT1 13
+#define CLK_MM_FAKE_ENG 14
+#define CLK_MM_MUTEX_32K 15
+#define CLK_MM_DISP_OVL0 16
+#define CLK_MM_DISP_OVL1 17
+#define CLK_MM_DISP_RDMA0 18
+#define CLK_MM_DISP_RDMA1 19
+#define CLK_MM_DISP_RDMA2 20
+#define CLK_MM_DISP_WDMA0 21
+#define CLK_MM_DISP_WDMA1 22
+#define CLK_MM_DISP_COLOR0 23
+#define CLK_MM_DISP_COLOR1 24
+#define CLK_MM_DISP_AAL 25
+#define CLK_MM_DISP_GAMMA 26
+#define CLK_MM_DISP_UFOE 27
+#define CLK_MM_DISP_SPLIT0 28
+#define CLK_MM_DISP_SPLIT1 29
+#define CLK_MM_DISP_MERGE 30
+#define CLK_MM_DISP_OD 31
+#define CLK_MM_DISP_PWM0MM 32
+#define CLK_MM_DISP_PWM026M 33
+#define CLK_MM_DISP_PWM1MM 34
+#define CLK_MM_DISP_PWM126M 35
+#define CLK_MM_DSI0_ENGINE 36
+#define CLK_MM_DSI0_DIGITAL 37
+#define CLK_MM_DSI1_ENGINE 38
+#define CLK_MM_DSI1_DIGITAL 39
+#define CLK_MM_DPI_PIXEL 40
+#define CLK_MM_DPI_ENGINE 41
+#define CLK_MM_DPI1_PIXEL 42
+#define CLK_MM_DPI1_ENGINE 43
+#define CLK_MM_HDMI_PIXEL 44
+#define CLK_MM_HDMI_PLLCK 45
+#define CLK_MM_HDMI_AUDIO 46
+#define CLK_MM_HDMI_SPDIF 47
+#define CLK_MM_LVDS_PIXEL 48
+#define CLK_MM_LVDS_CTS 49
+#define CLK_MM_SMI_LARB4 50
+#define CLK_MM_HDMI_HDCP 51
+#define CLK_MM_HDMI_HDCP24M 52
+#define CLK_MM_NR_CLK 53
+
+/* VDEC_SYS */
+
+#define CLK_VDEC_CKEN 1
+#define CLK_VDEC_LARB_CKEN 2
+#define CLK_VDEC_NR_CLK 3
+
+/* VENC_SYS */
+
+#define CLK_VENC_CKE0 1
+#define CLK_VENC_CKE1 2
+#define CLK_VENC_CKE2 3
+#define CLK_VENC_CKE3 4
+#define CLK_VENC_NR_CLK 5
+
+/* VENCLT_SYS */
+
+#define CLK_VENCLT_CKE0 1
+#define CLK_VENCLT_CKE1 2
+#define CLK_VENCLT_NR_CLK 3
+
#endif /* _DT_BINDINGS_CLK_MT8173_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-apq8084.h b/include/dt-bindings/clock/qcom,gcc-apq8084.h
index 2c0da566c46a..5aa7ebeae411 100644
--- a/include/dt-bindings/clock/qcom,gcc-apq8084.h
+++ b/include/dt-bindings/clock/qcom,gcc-apq8084.h
@@ -348,4 +348,10 @@
#define GCC_PCIE_1_PIPE_CLK 331
#define GCC_PCIE_1_SLV_AXI_CLK 332
+/* gdscs */
+#define USB_HS_HSIC_GDSC 0
+#define PCIE0_GDSC 1
+#define PCIE1_GDSC 2
+#define USB30_GDSC 3
+
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8916.h b/include/dt-bindings/clock/qcom,gcc-msm8916.h
index e430f644dd6c..257e2fbedd94 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8916.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8916.h
@@ -152,5 +152,35 @@
#define GCC_VENUS0_AHB_CLK 135
#define GCC_VENUS0_AXI_CLK 136
#define GCC_VENUS0_VCODEC0_CLK 137
+#define BIMC_DDR_CLK_SRC 138
+#define GCC_APSS_TCU_CLK 139
+#define GCC_GFX_TCU_CLK 140
+#define BIMC_GPU_CLK_SRC 141
+#define GCC_BIMC_GFX_CLK 142
+#define GCC_BIMC_GPU_CLK 143
+#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 144
+#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 145
+#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 146
+#define ULTAUDIO_XO_CLK_SRC 147
+#define ULTAUDIO_AHBFABRIC_CLK_SRC 148
+#define CODEC_DIGCODEC_CLK_SRC 149
+#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 150
+#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 151
+#define GCC_ULTAUDIO_AVSYNC_XO_CLK 152
+#define GCC_ULTAUDIO_STC_XO_CLK 153
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 154
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 155
+#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 156
+#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 157
+#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 158
+#define GCC_CODEC_DIGCODEC_CLK 159
+
+/* Indexes for GDSCs */
+#define BIMC_GDSC 0
+#define VENUS_GDSC 1
+#define MDSS_GDSC 2
+#define JPEG_GDSC 3
+#define VFE_GDSC 4
+#define OXILI_GDSC 5
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8974.h b/include/dt-bindings/clock/qcom,gcc-msm8974.h
index 51e51c860fe6..81d32f639190 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8974.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8974.h
@@ -321,4 +321,7 @@
#define GCC_SDCC1_CDCCAL_SLEEP_CLK 304
#define GCC_SDCC1_CDCCAL_FF_CLK 305
+/* gdscs */
+#define USB_HS_HSIC_GDSC 0
+
#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
index d72b5b35f15e..03861e3f498e 100644
--- a/include/dt-bindings/clock/qcom,mmcc-apq8084.h
+++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
@@ -180,4 +180,14 @@
#define VPU_SLEEP_CLK 163
#define VPU_VDP_CLK 164
+/* GDSCs */
+#define VENUS0_GDSC 0
+#define VENUS0_CORE0_GDSC 1
+#define VENUS0_CORE1_GDSC 2
+#define MDSS_GDSC 3
+#define CAMSS_JPEG_GDSC 4
+#define CAMSS_VFE_GDSC 5
+#define OXILI_GDSC 6
+#define OXILICX_GDSC 7
+
#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8974.h b/include/dt-bindings/clock/qcom,mmcc-msm8974.h
index 032ed87ef0f3..28651e54c9ae 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8974.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8974.h
@@ -158,4 +158,12 @@
#define SPDM_RM_AXI 141
#define SPDM_RM_OCMEMNOC 142
+/* gdscs */
+#define VENUS0_GDSC 0
+#define MDSS_GDSC 1
+#define CAMSS_JPEG_GDSC 2
+#define CAMSS_VFE_GDSC 3
+#define OXILI_GDSC 4
+#define OXILICX_GDSC 5
+
#endif
diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h
new file mode 100644
index 000000000000..e864aae0a256
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a7795 CPG Core Clocks */
+#define R8A7795_CLK_Z 0
+#define R8A7795_CLK_Z2 1
+#define R8A7795_CLK_ZR 2
+#define R8A7795_CLK_ZG 3
+#define R8A7795_CLK_ZTR 4
+#define R8A7795_CLK_ZTRD2 5
+#define R8A7795_CLK_ZT 6
+#define R8A7795_CLK_ZX 7
+#define R8A7795_CLK_S0D1 8
+#define R8A7795_CLK_S0D4 9
+#define R8A7795_CLK_S1D1 10
+#define R8A7795_CLK_S1D2 11
+#define R8A7795_CLK_S1D4 12
+#define R8A7795_CLK_S2D1 13
+#define R8A7795_CLK_S2D2 14
+#define R8A7795_CLK_S2D4 15
+#define R8A7795_CLK_S3D1 16
+#define R8A7795_CLK_S3D2 17
+#define R8A7795_CLK_S3D4 18
+#define R8A7795_CLK_LB 19
+#define R8A7795_CLK_CL 20
+#define R8A7795_CLK_ZB3 21
+#define R8A7795_CLK_ZB3D2 22
+#define R8A7795_CLK_CR 23
+#define R8A7795_CLK_CRD2 24
+#define R8A7795_CLK_SD0H 25
+#define R8A7795_CLK_SD0 26
+#define R8A7795_CLK_SD1H 27
+#define R8A7795_CLK_SD1 28
+#define R8A7795_CLK_SD2H 29
+#define R8A7795_CLK_SD2 30
+#define R8A7795_CLK_SD3H 31
+#define R8A7795_CLK_SD3 32
+#define R8A7795_CLK_SSP2 33
+#define R8A7795_CLK_SSP1 34
+#define R8A7795_CLK_SSPRS 35
+#define R8A7795_CLK_RPC 36
+#define R8A7795_CLK_RPCD2 37
+#define R8A7795_CLK_MSO 38
+#define R8A7795_CLK_CANFD 39
+#define R8A7795_CLK_HDMI 40
+#define R8A7795_CLK_CSI0 41
+#define R8A7795_CLK_CSIREF 42
+#define R8A7795_CLK_CP 43
+#define R8A7795_CLK_CPEX 44
+#define R8A7795_CLK_R 45
+#define R8A7795_CLK_OSC 46
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/renesas-cpg-mssr.h b/include/dt-bindings/clock/renesas-cpg-mssr.h
new file mode 100644
index 000000000000..569a3cc33ffb
--- /dev/null
+++ b/include/dt-bindings/clock/renesas-cpg-mssr.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__
+
+#define CPG_CORE 0 /* Core Clock */
+#define CPG_MOD 1 /* Module Clock */
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/sun4i-a10-pll2.h b/include/dt-bindings/clock/sun4i-a10-pll2.h
new file mode 100644
index 000000000000..071c8112d531
--- /dev/null
+++ b/include/dt-bindings/clock/sun4i-a10-pll2.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2015 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_SUN4I_A10_PLL2_H_
+#define __DT_BINDINGS_CLOCK_SUN4I_A10_PLL2_H_
+
+#define SUN4I_A10_PLL2_1X 0
+#define SUN4I_A10_PLL2_2X 1
+#define SUN4I_A10_PLL2_4X 2
+#define SUN4I_A10_PLL2_8X 3
+
+#endif /* __DT_BINDINGS_CLOCK_SUN4I_A10_PLL2_H_ */
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index d19763439472..56c16aaea112 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -194,6 +194,7 @@
#define VF610_PLL7_BYPASS 181
#define VF610_CLK_SNVS 182
#define VF610_CLK_DAP 183
-#define VF610_CLK_END 184
+#define VF610_CLK_OCOTP 184
+#define VF610_CLK_END 185
#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
index 4b840e822209..4915d40d3c3c 100644
--- a/include/keys/asymmetric-subtype.h
+++ b/include/keys/asymmetric-subtype.h
@@ -49,7 +49,7 @@ struct asymmetric_key_subtype {
static inline
struct asymmetric_key_subtype *asymmetric_key_subtype(const struct key *key)
{
- return key->type_data.p[0];
+ return key->payload.data[asym_subtype];
}
#endif /* _KEYS_ASYMMETRIC_SUBTYPE_H */
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index c0754abb2f56..59c1df9cf922 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -19,6 +19,16 @@
extern struct key_type key_type_asymmetric;
/*
+ * The key payload is four words. The asymmetric-type key uses them as
+ * follows:
+ */
+enum asymmetric_payload_bits {
+ asym_crypto,
+ asym_subtype,
+ asym_key_ids,
+};
+
+/*
* Identifiers for an asymmetric key ID. We have three ways of looking up a
* key derived from an X.509 certificate:
*
@@ -58,6 +68,11 @@ extern struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1,
size_t len_1,
const void *val_2,
size_t len_2);
+static inline
+const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key)
+{
+ return key->payload.data[asym_key_ids];
+}
/*
* The payload is at the discretion of the subtype.
diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
index 56f82e5c9975..f91ecd9d1bb1 100644
--- a/include/keys/trusted-type.h
+++ b/include/keys/trusted-type.h
@@ -12,10 +12,12 @@
#include <linux/key.h>
#include <linux/rcupdate.h>
+#include <linux/tpm.h>
#define MIN_KEY_SIZE 32
#define MAX_KEY_SIZE 128
-#define MAX_BLOB_SIZE 320
+#define MAX_BLOB_SIZE 512
+#define MAX_PCRINFO_SIZE 64
struct trusted_key_payload {
struct rcu_head rcu;
@@ -26,6 +28,16 @@ struct trusted_key_payload {
unsigned char blob[MAX_BLOB_SIZE];
};
+struct trusted_key_options {
+ uint16_t keytype;
+ uint32_t keyhandle;
+ unsigned char keyauth[TPM_DIGEST_SIZE];
+ unsigned char blobauth[TPM_DIGEST_SIZE];
+ uint32_t pcrinfo_len;
+ unsigned char pcrinfo[MAX_PCRINFO_SIZE];
+ int pcrlock;
+};
+
extern struct key_type key_type_trusted;
#endif /* _KEYS_TRUSTED_TYPE_H */
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index cebefb069c44..c56fef40f53e 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -15,6 +15,8 @@
#include <linux/key.h>
#include <linux/rcupdate.h>
+#ifdef CONFIG_KEYS
+
/*****************************************************************************/
/*
* the payload for a key of type "user" or "logon"
@@ -46,5 +48,11 @@ extern void user_describe(const struct key *user, struct seq_file *m);
extern long user_read(const struct key *key,
char __user *buffer, size_t buflen);
+static inline const struct user_key_payload *user_key_payload(const struct key *key)
+{
+ return (struct user_key_payload *)rcu_dereference_key(key);
+}
+
+#endif /* CONFIG_KEYS */
#endif /* _KEYS_USER_TYPE_H */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index e1e4d7c38dda..1800227af9d6 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -51,7 +51,7 @@ struct arch_timer_cpu {
bool armed;
/* Timer IRQ */
- const struct kvm_irq_level *irq;
+ struct kvm_irq_level irq;
/* VGIC mapping */
struct irq_phys_map *map;
@@ -71,5 +71,7 @@ u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
bool kvm_timer_should_fire(struct kvm_vcpu *vcpu);
+void kvm_timer_schedule(struct kvm_vcpu *vcpu);
+void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 6a3538ef7275..9c747cb14ad8 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -112,7 +112,6 @@ struct vgic_vmcr {
struct vgic_ops {
struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
- void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
void (*clear_eisr)(struct kvm_vcpu *vcpu);
@@ -159,7 +158,6 @@ struct irq_phys_map {
u32 virt_irq;
u32 phys_irq;
u32 irq;
- bool active;
};
struct irq_phys_map_entry {
@@ -296,22 +294,16 @@ struct vgic_v3_cpu_if {
};
struct vgic_cpu {
- /* per IRQ to LR mapping */
- u8 *vgic_irq_lr_map;
-
/* Pending/active/both interrupts on this VCPU */
- DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
- DECLARE_BITMAP( active_percpu, VGIC_NR_PRIVATE_IRQS);
- DECLARE_BITMAP( pend_act_percpu, VGIC_NR_PRIVATE_IRQS);
+ DECLARE_BITMAP(pending_percpu, VGIC_NR_PRIVATE_IRQS);
+ DECLARE_BITMAP(active_percpu, VGIC_NR_PRIVATE_IRQS);
+ DECLARE_BITMAP(pend_act_percpu, VGIC_NR_PRIVATE_IRQS);
/* Pending/active/both shared interrupts, dynamically sized */
unsigned long *pending_shared;
unsigned long *active_shared;
unsigned long *pend_act_shared;
- /* Bitmap of used/free list registers */
- DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
-
/* Number of list registers on this CPU */
int nr_lr;
@@ -354,8 +346,6 @@ int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
int virt_irq, int irq);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
-bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map);
-void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
diff --git a/include/linux/audit.h b/include/linux/audit.h
index b2abc996c25d..20eba1eb0a3c 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -143,7 +143,7 @@ extern void __audit_inode_child(const struct inode *parent,
extern void __audit_seccomp(unsigned long syscall, long signr, int code);
extern void __audit_ptrace(struct task_struct *t);
-static inline int audit_dummy_context(void)
+static inline bool audit_dummy_context(void)
{
void *p = current->audit_context;
return !p || *(int *)p;
@@ -345,9 +345,9 @@ static inline void audit_syscall_entry(int major, unsigned long a0,
{ }
static inline void audit_syscall_exit(void *pt_regs)
{ }
-static inline int audit_dummy_context(void)
+static inline bool audit_dummy_context(void)
{
- return 1;
+ return true;
}
static inline struct filename *audit_reusename(const __user char *name)
{
@@ -457,7 +457,7 @@ extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp
extern __printf(2, 3)
void audit_log_format(struct audit_buffer *ab, const char *fmt, ...);
extern void audit_log_end(struct audit_buffer *ab);
-extern int audit_string_contains_control(const char *string,
+extern bool audit_string_contains_control(const char *string,
size_t len);
extern void audit_log_n_hex(struct audit_buffer *ab,
const unsigned char *buf,
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c85f74946a8b..c82794f20110 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,7 +13,6 @@
#include <linux/sched.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
-#include <linux/memcontrol.h>
#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
@@ -267,8 +266,8 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
{
struct backing_dev_info *bdi = inode_to_bdi(inode);
- return cgroup_on_dfl(mem_cgroup_root_css->cgroup) &&
- cgroup_on_dfl(blkcg_root_css->cgroup) &&
+ return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
+ cgroup_subsys_on_dfl(io_cgrp_subsys) &&
bdi_cap_account_dirty(bdi) &&
(bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
(inode->i_sb->s_iflags & SB_I_CGROUPWB);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 8492721b39be..60d44b26276d 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -76,6 +76,7 @@ enum {
CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
+ CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
/* internal flags, do not use outside cgroup core proper */
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
@@ -83,6 +84,17 @@ enum {
};
/*
+ * cgroup_file is the handle for a file instance created in a cgroup which
+ * is used, for example, to generate file changed notifications. This can
+ * be obtained by setting cftype->file_offset.
+ */
+struct cgroup_file {
+ /* do not access any fields from outside cgroup core */
+ struct list_head node; /* anchored at css->files */
+ struct kernfs_node *kn;
+};
+
+/*
* Per-subsystem/per-cgroup state maintained by the system. This is the
* fundamental structural building block that controllers deal with.
*
@@ -122,6 +134,9 @@ struct cgroup_subsys_state {
*/
u64 serial_nr;
+ /* all cgroup_files associated with this css */
+ struct list_head files;
+
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
@@ -196,6 +211,9 @@ struct css_set {
*/
struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
+ /* all css_task_iters currently walking this cset */
+ struct list_head task_iters;
+
/* For RCU-protected deletion */
struct rcu_head rcu_head;
};
@@ -217,16 +235,16 @@ struct cgroup {
int id;
/*
- * If this cgroup contains any tasks, it contributes one to
- * populated_cnt. All children with non-zero popuplated_cnt of
- * their own contribute one. The count is zero iff there's no task
- * in this cgroup or its subtree.
+ * Each non-empty css_set associated with this cgroup contributes
+ * one to populated_cnt. All children with non-zero popuplated_cnt
+ * of their own contribute one. The count is zero iff there's no
+ * task in this cgroup or its subtree.
*/
int populated_cnt;
struct kernfs_node *kn; /* cgroup kernfs entry */
- struct kernfs_node *procs_kn; /* kn for "cgroup.procs" */
- struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
+ struct cgroup_file procs_file; /* handle for "cgroup.procs" */
+ struct cgroup_file events_file; /* handle for "cgroup.events" */
/*
* The bitmask of subsystems enabled on the child cgroups.
@@ -324,11 +342,6 @@ struct cftype {
*/
char name[MAX_CFTYPE_NAME];
unsigned long private;
- /*
- * If not 0, file mode is set to this value, otherwise it will
- * be figured out automatically
- */
- umode_t mode;
/*
* The maximum length of string, excluding trailing nul, that can
@@ -340,6 +353,14 @@ struct cftype {
unsigned int flags;
/*
+ * If non-zero, should contain the offset from the start of css to
+ * a struct cgroup_file field. cgroup will record the handle of
+ * the created file into it. The recorded handle can be used as
+ * long as the containing css remains accessible.
+ */
+ unsigned int file_offset;
+
+ /*
* Fields used for internal bookkeeping. Initialized automatically
* during registration.
*/
@@ -414,12 +435,10 @@ struct cgroup_subsys {
int (*can_fork)(struct task_struct *task, void **priv_p);
void (*cancel_fork)(struct task_struct *task, void *priv);
void (*fork)(struct task_struct *task, void *priv);
- void (*exit)(struct cgroup_subsys_state *css,
- struct cgroup_subsys_state *old_css,
- struct task_struct *task);
+ void (*exit)(struct task_struct *task);
+ void (*free)(struct task_struct *task);
void (*bind)(struct cgroup_subsys_state *root_css);
- int disabled;
int early_init;
/*
@@ -473,8 +492,31 @@ struct cgroup_subsys {
unsigned int depends_on;
};
-void cgroup_threadgroup_change_begin(struct task_struct *tsk);
-void cgroup_threadgroup_change_end(struct task_struct *tsk);
+extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+
+/**
+ * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
+ * @tsk: target task
+ *
+ * Called from threadgroup_change_begin() and allows cgroup operations to
+ * synchronize against threadgroup changes using a percpu_rw_semaphore.
+ */
+static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
+{
+ percpu_down_read(&cgroup_threadgroup_rwsem);
+}
+
+/**
+ * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
+ * @tsk: target task
+ *
+ * Called from threadgroup_change_end(). Counterpart of
+ * cgroup_threadcgroup_change_begin().
+ */
+static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
+{
+ percpu_up_read(&cgroup_threadgroup_rwsem);
+}
#else /* CONFIG_CGROUPS */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index eb7ca55f72ef..22e3754f89c5 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -13,10 +13,10 @@
#include <linux/nodemask.h>
#include <linux/rculist.h>
#include <linux/cgroupstats.h>
-#include <linux/rwsem.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/kernfs.h>
+#include <linux/jump_label.h>
#include <linux/cgroup-defs.h>
@@ -41,6 +41,10 @@ struct css_task_iter {
struct list_head *task_pos;
struct list_head *tasks_head;
struct list_head *mg_tasks_head;
+
+ struct css_set *cur_cset;
+ struct task_struct *cur_task;
+ struct list_head iters_node; /* css_set->task_iters */
};
extern struct cgroup_root cgrp_dfl_root;
@@ -50,6 +54,26 @@ extern struct css_set init_css_set;
#include <linux/cgroup_subsys.h>
#undef SUBSYS
+#define SUBSYS(_x) \
+ extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
+ extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
+#include <linux/cgroup_subsys.h>
+#undef SUBSYS
+
+/**
+ * cgroup_subsys_enabled - fast test on whether a subsys is enabled
+ * @ss: subsystem in question
+ */
+#define cgroup_subsys_enabled(ss) \
+ static_branch_likely(&ss ## _enabled_key)
+
+/**
+ * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
+ * @ss: subsystem in question
+ */
+#define cgroup_subsys_on_dfl(ss) \
+ static_branch_likely(&ss ## _on_dfl_key)
+
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
@@ -78,6 +102,7 @@ extern void cgroup_cancel_fork(struct task_struct *p,
extern void cgroup_post_fork(struct task_struct *p,
void *old_ss_priv[CGROUP_CANFORK_COUNT]);
void cgroup_exit(struct task_struct *p);
+void cgroup_free(struct task_struct *p);
int cgroup_init_early(void);
int cgroup_init(void);
@@ -211,11 +236,33 @@ void css_task_iter_end(struct css_task_iter *it);
* cgroup_taskset_for_each - iterate cgroup_taskset
* @task: the loop cursor
* @tset: taskset to iterate
+ *
+ * @tset may contain multiple tasks and they may belong to multiple
+ * processes. When there are multiple tasks in @tset, if a task of a
+ * process is in @tset, all tasks of the process are in @tset. Also, all
+ * are guaranteed to share the same source and destination csses.
+ *
+ * Iteration is not in any specific order.
*/
#define cgroup_taskset_for_each(task, tset) \
for ((task) = cgroup_taskset_first((tset)); (task); \
(task) = cgroup_taskset_next((tset)))
+/**
+ * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
+ * @leader: the loop cursor
+ * @tset: takset to iterate
+ *
+ * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
+ * may not contain any.
+ */
+#define cgroup_taskset_for_each_leader(leader, tset) \
+ for ((leader) = cgroup_taskset_first((tset)); (leader); \
+ (leader) = cgroup_taskset_next((tset))) \
+ if ((leader) != (leader)->group_leader) \
+ ; \
+ else
+
/*
* Inline functions.
*/
@@ -320,11 +367,11 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
*/
#ifdef CONFIG_PROVE_RCU
extern struct mutex cgroup_mutex;
-extern struct rw_semaphore css_set_rwsem;
+extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
lockdep_is_held(&cgroup_mutex) || \
- lockdep_is_held(&css_set_rwsem) || \
+ lockdep_is_held(&css_set_lock) || \
((task)->flags & PF_EXITING) || (__c))
#else
#define task_css_set_check(task, __c) \
@@ -412,68 +459,10 @@ static inline struct cgroup *task_cgroup(struct task_struct *task,
return task_css(task, subsys_id)->cgroup;
}
-/**
- * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
- * @cgrp: the cgroup of interest
- *
- * The default hierarchy is the v2 interface of cgroup and this function
- * can be used to test whether a cgroup is on the default hierarchy for
- * cases where a subsystem should behave differnetly depending on the
- * interface version.
- *
- * The set of behaviors which change on the default hierarchy are still
- * being determined and the mount option is prefixed with __DEVEL__.
- *
- * List of changed behaviors:
- *
- * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
- * and "name" are disallowed.
- *
- * - When mounting an existing superblock, mount options should match.
- *
- * - Remount is disallowed.
- *
- * - rename(2) is disallowed.
- *
- * - "tasks" is removed. Everything should be at process granularity. Use
- * "cgroup.procs" instead.
- *
- * - "cgroup.procs" is not sorted. pids will be unique unless they got
- * recycled inbetween reads.
- *
- * - "release_agent" and "notify_on_release" are removed. Replacement
- * notification mechanism will be implemented.
- *
- * - "cgroup.clone_children" is removed.
- *
- * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup
- * and its descendants contain no task; otherwise, 1. The file also
- * generates kernfs notification which can be monitored through poll and
- * [di]notify when the value of the file changes.
- *
- * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
- * take masks of ancestors with non-empty cpus/mems, instead of being
- * moved to an ancestor.
- *
- * - cpuset: a task can be moved into an empty cpuset, and again it takes
- * masks of ancestors.
- *
- * - memcg: use_hierarchy is on by default and the cgroup file for the flag
- * is not created.
- *
- * - blkcg: blk-throttle becomes properly hierarchical.
- *
- * - debug: disallowed on the default hierarchy.
- */
-static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
-{
- return cgrp->root == &cgrp_dfl_root;
-}
-
/* no synchronization, the result can only be used as a hint */
-static inline bool cgroup_has_tasks(struct cgroup *cgrp)
+static inline bool cgroup_is_populated(struct cgroup *cgrp)
{
- return !list_empty(&cgrp->cset_links);
+ return cgrp->populated_cnt;
}
/* returns ino associated with a cgroup */
@@ -527,6 +516,19 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
+/**
+ * cgroup_file_notify - generate a file modified event for a cgroup_file
+ * @cfile: target cgroup_file
+ *
+ * @cfile must have been obtained by setting cftype->file_offset.
+ */
+static inline void cgroup_file_notify(struct cgroup_file *cfile)
+{
+ /* might not have been created due to one of the CFTYPE selector flags */
+ if (cfile->kn)
+ kernfs_notify(cfile->kn);
+}
+
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
@@ -546,6 +548,7 @@ static inline void cgroup_cancel_fork(struct task_struct *p,
static inline void cgroup_post_fork(struct task_struct *p,
void *ss_priv[CGROUP_CANFORK_COUNT]) {}
static inline void cgroup_exit(struct task_struct *p) {}
+static inline void cgroup_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 3ecc07d0da77..c56988ac63f7 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -500,13 +500,14 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
*
* Clock with adjustable fractional divider affecting its output frequency.
*/
-
struct clk_fractional_divider {
struct clk_hw hw;
void __iomem *reg;
u8 mshift;
+ u8 mwidth;
u32 mmask;
u8 nshift;
+ u8 nwidth;
u32 nmask;
u8 flags;
spinlock_t *lock;
@@ -518,6 +519,41 @@ struct clk *clk_register_fractional_divider(struct device *dev,
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
u8 clk_divider_flags, spinlock_t *lock);
+/**
+ * struct clk_multiplier - adjustable multiplier clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register containing the multiplier
+ * @shift: shift to the multiplier bit field
+ * @width: width of the multiplier bit field
+ * @lock: register lock
+ *
+ * Clock with an adjustable multiplier affecting its output frequency.
+ * Implements .recalc_rate, .set_rate and .round_rate
+ *
+ * Flags:
+ * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
+ * from the register, with 0 being a valid value effectively
+ * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
+ * set, then a null multiplier will be considered as a bypass,
+ * leaving the parent rate unmodified.
+ * CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be
+ * rounded to the closest integer instead of the down one.
+ */
+struct clk_multiplier {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ spinlock_t *lock;
+};
+
+#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
+#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
+
+extern const struct clk_ops clk_multiplier_ops;
+
/***
* struct clk_composite - aggregate clock of mux, divider and gate clocks
*
@@ -606,7 +642,7 @@ void clk_unregister(struct clk *clk);
void devm_clk_unregister(struct device *dev, struct clk *clk);
/* helper functions */
-const char *__clk_get_name(struct clk *clk);
+const char *__clk_get_name(const struct clk *clk);
const char *clk_hw_get_name(const struct clk_hw *hw);
struct clk_hw *__clk_get_hw(struct clk *clk);
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
@@ -618,6 +654,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw);
unsigned long __clk_get_flags(struct clk *clk);
unsigned long clk_hw_get_flags(const struct clk_hw *hw);
bool clk_hw_is_prepared(const struct clk_hw *hw);
+bool clk_hw_is_enabled(const struct clk_hw *hw);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
int __clk_mux_determine_rate(struct clk_hw *hw,
@@ -690,6 +727,15 @@ static inline struct clk *of_clk_src_onecell_get(
{
return ERR_PTR(-ENOENT);
}
+static inline int of_clk_get_parent_count(struct device_node *np)
+{
+ return 0;
+}
+static inline int of_clk_parent_fill(struct device_node *np,
+ const char **parents, unsigned int size)
+{
+ return 0;
+}
static inline const char *of_clk_get_parent_name(struct device_node *np,
int index)
{
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 7669f7618f39..1e6932222e11 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -164,6 +164,7 @@ extern void __iomem *at91_pmc_base;
#define AT91_PMC_MOSCSELS (1 << 16) /* Main Oscillator Selection [some SAM9] */
#define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */
#define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */
+#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */
#define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */
#define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */
@@ -182,13 +183,18 @@ extern void __iomem *at91_pmc_base;
#define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */
#define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */
-#define AT91_PMC_PCR_PID (0x3f << 0) /* Peripheral ID */
-#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */
-#define AT91_PMC_PCR_DIV(n) ((n) << 16) /* Divisor Value */
-#define AT91_PMC_PCR_DIV0 0x0 /* Peripheral clock is MCK */
-#define AT91_PMC_PCR_DIV2 0x1 /* Peripheral clock is MCK/2 */
-#define AT91_PMC_PCR_DIV4 0x2 /* Peripheral clock is MCK/4 */
-#define AT91_PMC_PCR_DIV8 0x3 /* Peripheral clock is MCK/8 */
-#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */
+#define AT91_PMC_PCR_PID_MASK 0x3f
+#define AT91_PMC_PCR_GCKCSS_OFFSET 8
+#define AT91_PMC_PCR_GCKCSS_MASK (0x7 << AT91_PMC_PCR_GCKCSS_OFFSET)
+#define AT91_PMC_PCR_GCKCSS(n) ((n) << AT91_PMC_PCR_GCKCSS_OFFSET) /* GCK Clock Source Selection */
+#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */
+#define AT91_PMC_PCR_DIV_OFFSET 16
+#define AT91_PMC_PCR_DIV_MASK (0x3 << AT91_PMC_PCR_DIV_OFFSET)
+#define AT91_PMC_PCR_DIV(n) ((n) << AT91_PMC_PCR_DIV_OFFSET) /* Divisor Value */
+#define AT91_PMC_PCR_GCKDIV_OFFSET 20
+#define AT91_PMC_PCR_GCKDIV_MASK (0xff << AT91_PMC_PCR_GCKDIV_OFFSET)
+#define AT91_PMC_PCR_GCKDIV(n) ((n) << AT91_PMC_PCR_GCKDIV_OFFSET) /* Generated Clock Divisor Value */
+#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */
+#define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */
#endif
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
new file mode 100644
index 000000000000..fc481037478a
--- /dev/null
+++ b/include/linux/dma-iommu.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2014-2015 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __DMA_IOMMU_H
+#define __DMA_IOMMU_H
+
+#ifdef __KERNEL__
+#include <asm/errno.h>
+
+#ifdef CONFIG_IOMMU_DMA
+#include <linux/iommu.h>
+
+int iommu_dma_init(void);
+
+/* Domain management interface for IOMMU drivers */
+int iommu_get_dma_cookie(struct iommu_domain *domain);
+void iommu_put_dma_cookie(struct iommu_domain *domain);
+
+/* Setup call for arch DMA mapping code */
+int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size);
+
+/* General helpers for DMA-API <-> IOMMU-API interaction */
+int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
+
+/*
+ * These implement the bulk of the relevant DMA mapping callbacks, but require
+ * the arch code to take care of attributes and cache maintenance
+ */
+struct page **iommu_dma_alloc(struct device *dev, size_t size,
+ gfp_t gfp, int prot, dma_addr_t *handle,
+ void (*flush_page)(struct device *, const void *, phys_addr_t));
+void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
+ dma_addr_t *handle);
+
+int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma);
+
+dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, int prot);
+int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, int prot);
+
+/*
+ * Arch code with no special attribute handling may use these
+ * directly as DMA mapping callbacks for simplicity
+ */
+void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs);
+void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs);
+int iommu_dma_supported(struct device *dev, u64 mask);
+int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
+#else
+
+struct iommu_domain;
+
+static inline int iommu_dma_init(void)
+{
+ return 0;
+}
+
+static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
+{
+}
+
+#endif /* CONFIG_IOMMU_DMA */
+#endif /* __KERNEL__ */
+#endif /* __DMA_IOMMU_H */
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index 7ac17f57250e..187c10299722 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -20,6 +20,14 @@
#define CONTEXT_TT_MULTI_LEVEL 0
#define CONTEXT_TT_DEV_IOTLB 1
#define CONTEXT_TT_PASS_THROUGH 2
+/* Extended context entry types */
+#define CONTEXT_TT_PT_PASID 4
+#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5
+#define CONTEXT_TT_MASK (7ULL << 2)
+
+#define CONTEXT_DINVE (1ULL << 8)
+#define CONTEXT_PRS (1ULL << 9)
+#define CONTEXT_PASIDE (1ULL << 11)
struct intel_iommu;
struct dmar_domain;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9355f377fd46..9a1cb8c605e0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1053,12 +1053,11 @@ extern void locks_remove_file(struct file *);
extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_lock_inode_wait(struct inode *, struct file_lock *);
extern int posix_unblock_lock(struct file_lock *);
extern int vfs_test_lock(struct file *, struct file_lock *);
extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
-extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
+extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
extern void lease_get_mtime(struct inode *, struct timespec *time);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
@@ -1144,12 +1143,6 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
return -ENOLCK;
}
-static inline int posix_lock_inode_wait(struct inode *inode,
- struct file_lock *fl)
-{
- return -ENOLCK;
-}
-
static inline int posix_unblock_lock(struct file_lock *waiter)
{
return -ENOENT;
@@ -1171,8 +1164,7 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
return 0;
}
-static inline int flock_lock_inode_wait(struct inode *inode,
- struct file_lock *request)
+static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
{
return -ENOLCK;
}
@@ -1215,14 +1207,9 @@ static inline struct inode *file_inode(const struct file *f)
return f->f_inode;
}
-static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
-{
- return posix_lock_inode_wait(file_inode(filp), fl);
-}
-
-static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
+static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
{
- return flock_lock_inode_wait(file_inode(filp), fl);
+ return locks_lock_inode_wait(file_inode(filp), fl);
}
struct fasync_struct {
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index bcc853eccc85..7edd30515298 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -48,9 +48,7 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
static inline bool hugetlb_cgroup_disabled(void)
{
- if (hugetlb_cgrp_subsys.disabled)
- return true;
- return false;
+ return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
}
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 54733d5b503e..8fdc17b84739 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -26,6 +26,7 @@
#define _HYPERV_H
#include <uapi/linux/hyperv.h>
+#include <uapi/asm/hyperv.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 810a34f60424..1c1ff7e4faa4 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -25,13 +25,6 @@
extern struct files_struct init_files;
extern struct fs_struct init_fs;
-#ifdef CONFIG_CGROUPS
-#define INIT_GROUP_RWSEM(sig) \
- .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
-#else
-#define INIT_GROUP_RWSEM(sig)
-#endif
-
#ifdef CONFIG_CPUSETS
#define INIT_CPUSET_SEQ(tsk) \
.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -65,7 +58,6 @@ extern struct fs_struct init_fs;
INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
- INIT_GROUP_RWSEM(sig) \
}
extern struct nsproxy init_nsproxy;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 6240063bdcac..821273ca4873 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -1,5 +1,9 @@
/*
- * Copyright (c) 2006, Intel Corporation.
+ * Copyright © 2006-2015, Intel Corporation.
+ *
+ * Authors: Ashok Raj <ashok.raj@intel.com>
+ * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * David Woodhouse <David.Woodhouse@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -13,10 +17,6 @@
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) 2006-2008 Intel Corporation
- * Author: Ashok Raj <ashok.raj@intel.com>
- * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*/
#ifndef _INTEL_IOMMU_H_
@@ -25,7 +25,10 @@
#include <linux/types.h>
#include <linux/iova.h>
#include <linux/io.h>
+#include <linux/idr.h>
#include <linux/dma_remapping.h>
+#include <linux/mmu_notifier.h>
+#include <linux/list.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -57,16 +60,21 @@
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
+#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
+#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
+#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
+#define DMAR_PRS_REG 0xdc /* Page request status register */
+#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
+#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
+#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
+#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
#define OFFSET_STRIDE (9)
-/*
-#define dmar_readl(dmar, reg) readl(dmar + reg)
-#define dmar_readq(dmar, reg) ({ \
- u32 lo, hi; \
- lo = readl(dmar + reg); \
- hi = readl(dmar + reg + 4); \
- (((u64) hi) << 32) + lo; })
-*/
+
+#ifdef CONFIG_64BIT
+#define dmar_readq(a) readq(a)
+#define dmar_writeq(a,v) writeq(v,a)
+#else
static inline u64 dmar_readq(void __iomem *addr)
{
u32 lo, hi;
@@ -80,6 +88,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
writel((u32)val, addr);
writel((u32)(val >> 32), addr + 4);
}
+#endif
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
@@ -123,7 +132,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1)
-/* PASID support used to be on bit 28 */
+#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
#define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1)
@@ -253,6 +262,11 @@ enum {
#define QI_DIOTLB_TYPE 0x3
#define QI_IEC_TYPE 0x4
#define QI_IWD_TYPE 0x5
+#define QI_EIOTLB_TYPE 0x6
+#define QI_PC_TYPE 0x7
+#define QI_DEIOTLB_TYPE 0x8
+#define QI_PGRP_RESP_TYPE 0x9
+#define QI_PSTRM_RESP_TYPE 0xa
#define QI_IEC_SELECTIVE (((u64)1) << 4)
#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
@@ -280,6 +294,53 @@ enum {
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
+#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
+#define QI_PC_DID(did) (((u64)did) << 16)
+#define QI_PC_GRAN(gran) (((u64)gran) << 4)
+
+#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0))
+#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
+
+#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
+#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
+#define QI_EIOTLB_AM(am) (((u64)am))
+#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
+#define QI_EIOTLB_DID(did) (((u64)did) << 16)
+#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
+
+#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
+#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
+#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
+#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
+#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
+#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16)
+#define QI_DEV_EIOTLB_MAX_INVS 32
+
+#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
+#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32)
+#define QI_PGRP_RESP_CODE(res) ((u64)(res))
+#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
+#define QI_PGRP_DID(did) (((u64)(did)) << 16)
+#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
+
+#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK)
+#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4)
+#define QI_PSTRM_RESP_CODE(res) ((u64)(res))
+#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55)
+#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32)
+#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24)
+#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4)
+
+#define QI_RESP_SUCCESS 0x0
+#define QI_RESP_INVALID 0x1
+#define QI_RESP_FAILURE 0xf
+
+#define QI_GRAN_ALL_ALL 0
+#define QI_GRAN_NONG_ALL 1
+#define QI_GRAN_NONG_PASID 2
+#define QI_GRAN_PSI_PASID 3
+
struct qi_desc {
u64 low, high;
};
@@ -327,6 +388,10 @@ enum {
#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
+struct pasid_entry;
+struct pasid_state_entry;
+struct page_req_dsc;
+
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 reg_phys; /* physical address of hw register set */
@@ -338,7 +403,7 @@ struct intel_iommu {
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
int msagaw; /* max sagaw of this iommu */
- unsigned int irq;
+ unsigned int irq, pr_irq;
u16 segment; /* PCI segment# */
unsigned char name[13]; /* Device Name */
@@ -350,6 +415,18 @@ struct intel_iommu {
struct iommu_flush flush;
#endif
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ /* These are large and need to be contiguous, so we allocate just
+ * one for now. We'll maybe want to rethink that if we truly give
+ * devices away to userspace processes (e.g. for DPDK) and don't
+ * want to trust that userspace will use *only* the PASID it was
+ * told to. But while it's all driver-arbitrated, we're fine. */
+ struct pasid_entry *pasid_table;
+ struct pasid_state_entry *pasid_state_table;
+ struct page_req_dsc *prq;
+ unsigned char prq_name[16]; /* Name for PRQ interrupt */
+ struct idr pasid_idr;
+#endif
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
@@ -389,6 +466,38 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void);
+#ifdef CONFIG_INTEL_IOMMU_SVM
+extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu);
+extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu);
+extern int intel_svm_enable_prq(struct intel_iommu *iommu);
+extern int intel_svm_finish_prq(struct intel_iommu *iommu);
+
+struct svm_dev_ops;
+
+struct intel_svm_dev {
+ struct list_head list;
+ struct rcu_head rcu;
+ struct device *dev;
+ struct svm_dev_ops *ops;
+ int users;
+ u16 did;
+ u16 dev_iotlb:1;
+ u16 sid, qdep;
+};
+
+struct intel_svm {
+ struct mmu_notifier notifier;
+ struct mm_struct *mm;
+ struct intel_iommu *iommu;
+ int flags;
+ int pasid;
+ struct list_head devs;
+};
+
+extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
+extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
+#endif
+
extern const struct attribute_group *intel_iommu_groups[];
#endif
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
new file mode 100644
index 000000000000..3c25794042f9
--- /dev/null
+++ b/include/linux/intel-svm.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright © 2015 Intel Corporation.
+ *
+ * Authors: David Woodhouse <David.Woodhouse@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INTEL_SVM_H__
+#define __INTEL_SVM_H__
+
+struct device;
+
+struct svm_dev_ops {
+ void (*fault_cb)(struct device *dev, int pasid, u64 address,
+ u32 private, int rwxp, int response);
+};
+
+/* Values for rxwp in fault_cb callback */
+#define SVM_REQ_READ (1<<3)
+#define SVM_REQ_WRITE (1<<2)
+#define SVM_REQ_EXEC (1<<1)
+#define SVM_REQ_PRIV (1<<0)
+
+
+/*
+ * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main"
+ * PASID for the current process. Even if a PASID already exists, a new one
+ * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID
+ * will not be given to subsequent callers. This facility allows a driver to
+ * disambiguate between multiple device contexts which access the same MM,
+ * if there is no other way to do so. It should be used sparingly, if at all.
+ */
+#define SVM_FLAG_PRIVATE_PASID (1<<0)
+
+/*
+ * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only
+ * for access to kernel addresses. No IOTLB flushes are automatically done
+ * for kernel mappings; it is valid only for access to the kernel's static
+ * 1:1 mapping of physical memory — not to vmalloc or even module mappings.
+ * A future API addition may permit the use of such ranges, by means of an
+ * explicit IOTLB flush call (akin to the DMA API's unmap method).
+ *
+ * It is unlikely that we will ever hook into flush_tlb_kernel_range() to
+ * do such IOTLB flushes automatically.
+ */
+#define SVM_FLAG_SUPERVISOR_MODE (1<<1)
+
+#ifdef CONFIG_INTEL_IOMMU_SVM
+
+/**
+ * intel_svm_bind_mm() - Bind the current process to a PASID
+ * @dev: Device to be granted acccess
+ * @pasid: Address for allocated PASID
+ * @flags: Flags. Later for requesting supervisor mode, etc.
+ * @ops: Callbacks to device driver
+ *
+ * This function attempts to enable PASID support for the given device.
+ * If the @pasid argument is non-%NULL, a PASID is allocated for access
+ * to the MM of the current process.
+ *
+ * By using a %NULL value for the @pasid argument, this function can
+ * be used to simply validate that PASID support is available for the
+ * given device — i.e. that it is behind an IOMMU which has the
+ * requisite support, and is enabled.
+ *
+ * Page faults are handled transparently by the IOMMU code, and there
+ * should be no need for the device driver to be involved. If a page
+ * fault cannot be handled (i.e. is an invalid address rather than
+ * just needs paging in), then the page request will be completed by
+ * the core IOMMU code with appropriate status, and the device itself
+ * can then report the resulting fault to its driver via whatever
+ * mechanism is appropriate.
+ *
+ * Multiple calls from the same process may result in the same PASID
+ * being re-used. A reference count is kept.
+ */
+extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
+ struct svm_dev_ops *ops);
+
+/**
+ * intel_svm_unbind_mm() - Unbind a specified PASID
+ * @dev: Device for which PASID was allocated
+ * @pasid: PASID value to be unbound
+ *
+ * This function allows a PASID to be retired when the device no
+ * longer requires access to the address space of a given process.
+ *
+ * If the use count for the PASID in question reaches zero, the
+ * PASID is revoked and may no longer be used by hardware.
+ *
+ * Device drivers are required to ensure that no access (including
+ * page requests) is currently outstanding for the PASID in question,
+ * before calling this function.
+ */
+extern int intel_svm_unbind_mm(struct device *dev, int pasid);
+
+#else /* CONFIG_INTEL_IOMMU_SVM */
+
+static inline int intel_svm_bind_mm(struct device *dev, int *pasid,
+ int flags, struct svm_dev_ops *ops)
+{
+ return -ENOSYS;
+}
+
+static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
+{
+ BUG();
+}
+#endif /* CONFIG_INTEL_IOMMU_SVM */
+
+#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL))
+
+#endif /* __INTEL_SVM_H__ */
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h
index bbced83b32ee..376a27c9cc6a 100644
--- a/include/linux/iommu-common.h
+++ b/include/linux/iommu-common.h
@@ -7,6 +7,7 @@
#define IOMMU_POOL_HASHBITS 4
#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
+#define IOMMU_ERROR_CODE (~(unsigned long) 0)
struct iommu_pool {
unsigned long start;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index f9c1b6d0f2e4..f28dff313b07 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -81,6 +81,7 @@ struct iommu_domain {
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
+ void *iova_cookie;
};
enum iommu_cap {
@@ -167,7 +168,7 @@ struct iommu_ops {
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
int (*add_device)(struct device *dev);
void (*remove_device)(struct device *dev);
- int (*device_group)(struct device *dev, unsigned int *groupid);
+ struct iommu_group *(*device_group)(struct device *dev);
int (*domain_get_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
int (*domain_set_attr)(struct iommu_domain *domain,
@@ -316,6 +317,11 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
return domain->ops->map_sg(domain, iova, sg, nents, prot);
}
+/* PCI device grouping function */
+extern struct iommu_group *pci_device_group(struct device *dev);
+/* Generic device grouping function */
+extern struct iommu_group *generic_device_group(struct device *dev);
+
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h
new file mode 100644
index 000000000000..1551b5b2f4c2
--- /dev/null
+++ b/include/linux/irqbypass.h
@@ -0,0 +1,90 @@
+/*
+ * IRQ offload/bypass manager
+ *
+ * Copyright (C) 2015 Red Hat, Inc.
+ * Copyright (c) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef IRQBYPASS_H
+#define IRQBYPASS_H
+
+#include <linux/list.h>
+
+struct irq_bypass_consumer;
+
+/*
+ * Theory of operation
+ *
+ * The IRQ bypass manager is a simple set of lists and callbacks that allows
+ * IRQ producers (ex. physical interrupt sources) to be matched to IRQ
+ * consumers (ex. virtualization hardware that allows IRQ bypass or offload)
+ * via a shared token (ex. eventfd_ctx). Producers and consumers register
+ * independently. When a token match is found, the optional @stop callback
+ * will be called for each participant. The pair will then be connected via
+ * the @add_* callbacks, and finally the optional @start callback will allow
+ * any final coordination. When either participant is unregistered, the
+ * process is repeated using the @del_* callbacks in place of the @add_*
+ * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings
+ * are not supported.
+ */
+
+/**
+ * struct irq_bypass_producer - IRQ bypass producer definition
+ * @node: IRQ bypass manager private list management
+ * @token: opaque token to match between producer and consumer
+ * @irq: Linux IRQ number for the producer device
+ * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional)
+ * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional)
+ * @stop: Perform any quiesce operations necessary prior to add/del (optional)
+ * @start: Perform any startup operations necessary after add/del (optional)
+ *
+ * The IRQ bypass producer structure represents an interrupt source for
+ * participation in possible host bypass, for instance an interrupt vector
+ * for a physical device assigned to a VM.
+ */
+struct irq_bypass_producer {
+ struct list_head node;
+ void *token;
+ int irq;
+ int (*add_consumer)(struct irq_bypass_producer *,
+ struct irq_bypass_consumer *);
+ void (*del_consumer)(struct irq_bypass_producer *,
+ struct irq_bypass_consumer *);
+ void (*stop)(struct irq_bypass_producer *);
+ void (*start)(struct irq_bypass_producer *);
+};
+
+/**
+ * struct irq_bypass_consumer - IRQ bypass consumer definition
+ * @node: IRQ bypass manager private list management
+ * @token: opaque token to match between producer and consumer
+ * @add_producer: Connect the IRQ consumer to an IRQ producer
+ * @del_producer: Disconnect the IRQ consumer from an IRQ producer
+ * @stop: Perform any quiesce operations necessary prior to add/del (optional)
+ * @start: Perform any startup operations necessary after add/del (optional)
+ *
+ * The IRQ bypass consumer structure represents an interrupt sink for
+ * participation in possible host bypass, for instance a hypervisor may
+ * support offloads to allow bypassing the host entirely or offload
+ * portions of the interrupt handling to the VM.
+ */
+struct irq_bypass_consumer {
+ struct list_head node;
+ void *token;
+ int (*add_producer)(struct irq_bypass_consumer *,
+ struct irq_bypass_producer *);
+ void (*del_producer)(struct irq_bypass_consumer *,
+ struct irq_bypass_producer *);
+ void (*stop)(struct irq_bypass_consumer *);
+ void (*start)(struct irq_bypass_consumer *);
+};
+
+int irq_bypass_register_producer(struct irq_bypass_producer *);
+void irq_bypass_unregister_producer(struct irq_bypass_producer *);
+int irq_bypass_register_consumer(struct irq_bypass_consumer *);
+void irq_bypass_unregister_consumer(struct irq_bypass_consumer *);
+
+#endif /* IRQBYPASS_H */
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f1094238ab2a..8dde55974f18 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -214,11 +214,6 @@ static inline int jump_label_apply_nops(struct module *mod)
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
#define jump_label_enabled static_key_enabled
-static inline bool static_key_enabled(struct static_key *key)
-{
- return static_key_count(key) > 0;
-}
-
static inline void static_key_enable(struct static_key *key)
{
int count = static_key_count(key);
@@ -265,6 +260,17 @@ struct static_key_false {
#define DEFINE_STATIC_KEY_FALSE(name) \
struct static_key_false name = STATIC_KEY_FALSE_INIT
+extern bool ____wrong_branch_error(void);
+
+#define static_key_enabled(x) \
+({ \
+ if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \
+ !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
+ !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+ ____wrong_branch_error(); \
+ static_key_count((struct static_key *)x) > 0; \
+})
+
#ifdef HAVE_JUMP_LABEL
/*
@@ -323,8 +329,6 @@ struct static_key_false {
* See jump_label_type() / jump_label_init_type().
*/
-extern bool ____wrong_branch_error(void);
-
#define static_branch_likely(x) \
({ \
bool branch; \
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index ff9f1d394235..7463355a198b 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -40,8 +40,7 @@ struct key_construction {
*/
struct key_preparsed_payload {
char *description; /* Proposed key description (or NULL) */
- void *type_data[2]; /* Private key-type data */
- void *payload[2]; /* Proposed payload */
+ union key_payload payload; /* Proposed payload */
const void *data; /* Raw data */
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
diff --git a/include/linux/key.h b/include/linux/key.h
index e1d4715f3222..66f705243985 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -89,6 +89,11 @@ struct keyring_index_key {
size_t desc_len;
};
+union key_payload {
+ void __rcu *rcu_data0;
+ void *data[4];
+};
+
/*****************************************************************************/
/*
* key reference with possession attribute handling
@@ -186,28 +191,18 @@ struct key {
};
};
- /* type specific data
- * - this is used by the keyring type to index the name
- */
- union {
- struct list_head link;
- unsigned long x[2];
- void *p[2];
- int reject_error;
- } type_data;
-
/* key data
* - this is used to hold the data actually used in cryptography or
* whatever
*/
union {
- union {
- unsigned long value;
- void __rcu *rcudata;
- void *data;
- void *data2[2];
- } payload;
- struct assoc_array keys;
+ union key_payload payload;
+ struct {
+ /* Keyring bits */
+ struct list_head name_link;
+ struct assoc_array keys;
+ };
+ int reject_error;
};
};
@@ -336,12 +331,12 @@ static inline bool key_is_instantiated(const struct key *key)
}
#define rcu_dereference_key(KEY) \
- (rcu_dereference_protected((KEY)->payload.rcudata, \
+ (rcu_dereference_protected((KEY)->payload.rcu_data0, \
rwsem_is_locked(&((struct key *)(KEY))->sem)))
#define rcu_assign_keypointer(KEY, PAYLOAD) \
do { \
- rcu_assign_pointer((KEY)->payload.rcudata, (PAYLOAD)); \
+ rcu_assign_pointer((KEY)->payload.rcu_data0, (PAYLOAD)); \
} while (0)
#ifdef CONFIG_SYSCTL
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1bef9e21e725..242a6d2b53ff 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/irqflags.h>
#include <linux/context_tracking.h>
+#include <linux/irqbypass.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -140,6 +141,8 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_APIC_PAGE_RELOAD 25
#define KVM_REQ_SMI 26
#define KVM_REQ_HV_CRASH 27
+#define KVM_REQ_IOAPIC_EOI_EXIT 28
+#define KVM_REQ_HV_RESET 29
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -231,6 +234,9 @@ struct kvm_vcpu {
unsigned long requests;
unsigned long guest_debug;
+ int pre_pcpu;
+ struct list_head blocked_vcpu_list;
+
struct mutex mutex;
struct kvm_run *run;
@@ -329,6 +335,18 @@ struct kvm_kernel_irq_routing_entry {
struct hlist_node link;
};
+#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+struct kvm_irq_routing_table {
+ int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
+ u32 nr_rt_entries;
+ /*
+ * Array indexed by gsi. Each entry contains list of irq chips
+ * the gsi is connected to.
+ */
+ struct hlist_head map[0];
+};
+#endif
+
#ifndef KVM_PRIVATE_MEM_SLOTS
#define KVM_PRIVATE_MEM_SLOTS 0
#endif
@@ -455,10 +473,14 @@ void vcpu_put(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_IOAPIC
void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
+void kvm_arch_irq_routing_update(struct kvm *kvm);
#else
static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
{
}
+static inline void kvm_arch_irq_routing_update(struct kvm *kvm)
+{
+}
#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
@@ -625,6 +647,8 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
@@ -803,10 +827,13 @@ int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
bool line_status);
-int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
int irq_source_id, int level, bool line_status);
+int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id,
+ int level, bool line_status);
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
+void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
@@ -1002,6 +1029,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
#endif
int kvm_setup_default_irq_routing(struct kvm *kvm);
+int kvm_setup_empty_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *entries,
unsigned nr,
@@ -1144,5 +1172,15 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
{
}
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
-#endif
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
+ struct irq_bypass_producer *);
+void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
+ struct irq_bypass_producer *);
+void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
+void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
+int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set);
+#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
+#endif
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
new file mode 100644
index 000000000000..0c1de05098c8
--- /dev/null
+++ b/include/linux/kvm_irqfd.h
@@ -0,0 +1,71 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * irqfd: Allows an fd to be used to inject an interrupt to the guest
+ * Credit goes to Avi Kivity for the original idea.
+ */
+
+#ifndef __LINUX_KVM_IRQFD_H
+#define __LINUX_KVM_IRQFD_H
+
+#include <linux/kvm_host.h>
+#include <linux/poll.h>
+
+/*
+ * Resampling irqfds are a special variety of irqfds used to emulate
+ * level triggered interrupts. The interrupt is asserted on eventfd
+ * trigger. On acknowledgment through the irq ack notifier, the
+ * interrupt is de-asserted and userspace is notified through the
+ * resamplefd. All resamplers on the same gsi are de-asserted
+ * together, so we don't need to track the state of each individual
+ * user. We can also therefore share the same irq source ID.
+ */
+struct kvm_kernel_irqfd_resampler {
+ struct kvm *kvm;
+ /*
+ * List of resampling struct _irqfd objects sharing this gsi.
+ * RCU list modified under kvm->irqfds.resampler_lock
+ */
+ struct list_head list;
+ struct kvm_irq_ack_notifier notifier;
+ /*
+ * Entry in list of kvm->irqfd.resampler_list. Use for sharing
+ * resamplers among irqfds on the same gsi.
+ * Accessed and modified under kvm->irqfds.resampler_lock
+ */
+ struct list_head link;
+};
+
+struct kvm_kernel_irqfd {
+ /* Used for MSI fast-path */
+ struct kvm *kvm;
+ wait_queue_t wait;
+ /* Update side is protected by irqfds.lock */
+ struct kvm_kernel_irq_routing_entry irq_entry;
+ seqcount_t irq_entry_sc;
+ /* Used for level IRQ fast-path */
+ int gsi;
+ struct work_struct inject;
+ /* The resampler used by this irqfd (resampler-only) */
+ struct kvm_kernel_irqfd_resampler *resampler;
+ /* Eventfd notified on resample (resampler-only) */
+ struct eventfd_ctx *resamplefd;
+ /* Entry in list of irqfds for a resampler (resampler-only) */
+ struct list_head resampler_link;
+ /* Used for setup/shutdown */
+ struct eventfd_ctx *eventfd;
+ struct list_head list;
+ poll_table pt;
+ struct work_struct shutdown;
+ struct irq_bypass_consumer consumer;
+ struct irq_bypass_producer *producer;
+};
+
+#endif /* __LINUX_KVM_IRQFD_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c9cfbcdb8d14..83577f8fd15b 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -254,6 +254,7 @@ enum {
ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */
ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */
+ ATA_PFLAG_EXTERNAL = (1 << 22), /* eSATA/external port */
/* struct ata_queued_cmd flags */
ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index c06c70b4404e..cd0e2413c358 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -213,6 +213,9 @@ struct mem_cgroup {
/* OOM-Killer disable */
int oom_kill_disable;
+ /* handle for "memory.events" */
+ struct cgroup_file events_file;
+
/* protect arrays of thresholds */
struct mutex thresholds_lock;
@@ -285,6 +288,7 @@ static inline void mem_cgroup_events(struct mem_cgroup *memcg,
unsigned int nr)
{
this_cpu_add(memcg->stat->events[idx], nr);
+ cgroup_file_notify(&memcg->events_file);
}
bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
@@ -345,9 +349,7 @@ ino_t page_cgroup_ino(struct page *page);
static inline bool mem_cgroup_disabled(void)
{
- if (memory_cgrp_subsys.disabled)
- return true;
- return false;
+ return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
/*
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index cc8ad1e1a307..b24c771cebd5 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -11,6 +11,8 @@
#ifndef __LINUX_MFD_AXP20X_H
#define __LINUX_MFD_AXP20X_H
+#include <linux/regmap.h>
+
enum {
AXP152_ID = 0,
AXP202_ID,
@@ -438,4 +440,26 @@ struct axp288_extcon_pdata {
struct gpio_desc *gpio_mux_cntl;
};
+/* generic helper function for reading 9-16 bit wide regs */
+static inline int axp20x_read_variable_width(struct regmap *regmap,
+ unsigned int reg, unsigned int width)
+{
+ unsigned int reg_val, result;
+ int err;
+
+ err = regmap_read(regmap, reg, &reg_val);
+ if (err)
+ return err;
+
+ result = reg_val << (width - 8);
+
+ err = regmap_read(regmap, reg + 1, &reg_val);
+ if (err)
+ return err;
+
+ result |= reg_val;
+
+ return result;
+}
+
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/tps6105x.h b/include/linux/mfd/tps6105x.h
index 386743dd931c..8bc51180800a 100644
--- a/include/linux/mfd/tps6105x.h
+++ b/include/linux/mfd/tps6105x.h
@@ -10,6 +10,7 @@
#define MFD_TPS6105X_H
#include <linux/i2c.h>
+#include <linux/regmap.h>
#include <linux/regulator/machine.h>
/*
@@ -82,20 +83,15 @@ struct tps6105x_platform_data {
/**
* struct tps6105x - state holder for the TPS6105x drivers
- * @mutex: mutex to serialize I2C accesses
* @i2c_client: corresponding I2C client
* @regulator: regulator device if used in voltage mode
+ * @regmap: used for i2c communcation on accessing registers
*/
struct tps6105x {
struct tps6105x_platform_data *pdata;
- struct mutex lock;
struct i2c_client *client;
struct regulator_dev *regulator;
+ struct regmap *regmap;
};
-extern int tps6105x_set(struct tps6105x *tps6105x, u8 reg, u8 value);
-extern int tps6105x_get(struct tps6105x *tps6105x, u8 reg, u8 *buf);
-extern int tps6105x_mask_and_set(struct tps6105x *tps6105x, u8 reg,
- u8 bitmask, u8 bitvalues);
-
#endif
diff --git a/include/linux/power/bq27x00_battery.h b/include/linux/power/bq27x00_battery.h
deleted file mode 100644
index a857f719bf40..000000000000
--- a/include/linux/power/bq27x00_battery.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __LINUX_BQ27X00_BATTERY_H__
-#define __LINUX_BQ27X00_BATTERY_H__
-
-/**
- * struct bq27000_plaform_data - Platform data for bq27000 devices
- * @name: Name of the battery. If NULL the driver will fallback to "bq27000".
- * @read: HDQ read callback.
- * This function should provide access to the HDQ bus the battery is
- * connected to.
- * The first parameter is a pointer to the battery device, the second the
- * register to be read. The return value should either be the content of
- * the passed register or an error value.
- */
-struct bq27000_platform_data {
- const char *name;
- int (*read)(struct device *dev, unsigned int);
-};
-
-#endif
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
new file mode 100644
index 000000000000..45f6a7b5b3cb
--- /dev/null
+++ b/include/linux/power/bq27xxx_battery.h
@@ -0,0 +1,31 @@
+#ifndef __LINUX_BQ27X00_BATTERY_H__
+#define __LINUX_BQ27X00_BATTERY_H__
+
+/**
+ * struct bq27xxx_plaform_data - Platform data for bq27xxx devices
+ * @name: Name of the battery.
+ * @chip: Chip class number of this device.
+ * @read: HDQ read callback.
+ * This function should provide access to the HDQ bus the battery is
+ * connected to.
+ * The first parameter is a pointer to the battery device, the second the
+ * register to be read. The return value should either be the content of
+ * the passed register or an error value.
+ */
+enum bq27xxx_chip {
+ BQ27000 = 1, /* bq27000, bq27200 */
+ BQ27010, /* bq27010, bq27210 */
+ BQ27500, /* bq27500, bq27510, bq27520 */
+ BQ27530, /* bq27530, bq27531 */
+ BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
+ BQ27545, /* bq27545 */
+ BQ27421, /* bq27421, bq27425, bq27441, bq27621 */
+};
+
+struct bq27xxx_platform_data {
+ const char *name;
+ enum bq27xxx_chip chip;
+ int (*read)(struct device *dev, unsigned int);
+};
+
+#endif
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index eadf28cb2fc9..c4fa907c8f14 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -65,7 +65,7 @@ struct charger_cable {
const char *extcon_name;
const char *name;
- /* The charger-manager use Exton framework*/
+ /* The charger-manager use Extcon framework */
struct extcon_specific_cable_nb extcon_dev;
struct work_struct wq;
struct notifier_block nb;
@@ -94,7 +94,7 @@ struct charger_cable {
* the charger will be maintained with disabled state.
* @cables:
* the array of charger cables to enable/disable charger
- * and set current limit according to constratint data of
+ * and set current limit according to constraint data of
* struct charger_cable if only charger cable included
* in the array of charger cables is attached/detached.
* @num_cables: the number of charger cables.
@@ -148,7 +148,7 @@ struct charger_regulator {
* @polling_interval_ms: interval in millisecond at which
* charger manager will monitor battery health
* @battery_present:
- * Specify where information for existance of battery can be obtained
+ * Specify where information for existence of battery can be obtained
* @psy_charger_stat: the names of power-supply for chargers
* @num_charger_regulator: the number of entries in charger_regulators
* @charger_regulators: array of charger regulators
@@ -156,7 +156,7 @@ struct charger_regulator {
* @thermal_zone : the name of thermal zone for battery
* @temp_min : Minimum battery temperature for charging.
* @temp_max : Maximum battery temperature for charging.
- * @temp_diff : Temperature diffential to restart charging.
+ * @temp_diff : Temperature difference to restart charging.
* @measure_battery_temp:
* true: measure battery temperature
* false: measure ambient temperature
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 8e7a25b068b0..831479f8df8f 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -75,20 +75,8 @@ struct pstore_info {
#define PSTORE_FLAGS_FRAGILE 1
-#ifdef CONFIG_PSTORE
extern int pstore_register(struct pstore_info *);
+extern void pstore_unregister(struct pstore_info *);
extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
-#else
-static inline int
-pstore_register(struct pstore_info *psi)
-{
- return -ENODEV;
-}
-static inline bool
-pstore_cannot_block_path(enum kmsg_dump_reason reason)
-{
- return false;
-}
-#endif
#endif /*_LINUX_PSTORE_H*/
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 92273776bce6..c2f2574ff61c 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -198,6 +198,7 @@ enum pxa_ssp_type {
LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
LPSS_BYT_SSP,
LPSS_SPT_SSP,
+ LPSS_BXT_SSP,
};
struct ssp_device {
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 45932228cbf5..9c2903e58adb 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -245,6 +245,7 @@ enum regulator_type {
* @linear_min_sel: Minimal selector for starting linear mapping
* @fixed_uV: Fixed voltage of rails.
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @min_dropout_uV: The minimum dropout voltage this regulator can handle
* @linear_ranges: A constant table of possible voltage ranges.
* @n_linear_ranges: Number of entries in the @linear_ranges table.
* @volt_table: Voltage mapping table (if table based mapping)
@@ -292,6 +293,7 @@ struct regulator_desc {
unsigned int linear_min_sel;
int fixed_uV;
unsigned int ramp_delay;
+ int min_dropout_uV;
const struct regulator_linear_range *linear_ranges;
int n_linear_ranges;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 055f2ee3b0f0..eeb5066a44fb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -772,18 +772,6 @@ struct signal_struct {
unsigned audit_tty_log_passwd;
struct tty_audit_buf *tty_audit_buf;
#endif
-#ifdef CONFIG_CGROUPS
- /*
- * group_rwsem prevents new tasks from entering the threadgroup and
- * member tasks from exiting,a more specifically, setting of
- * PF_EXITING. fork and exit paths are protected with this rwsem
- * using threadgroup_change_begin/end(). Users which require
- * threadgroup to remain stable should use threadgroup_[un]lock()
- * which also takes care of exec path. Currently, cgroup is the
- * only user.
- */
- struct rw_semaphore group_rwsem;
-#endif
oom_flags_t oom_flags;
short oom_score_adj; /* OOM kill score adjustment */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 6b00f18f5e6b..cce80e6dc7d1 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -51,6 +51,8 @@ extern struct bus_type spi_bus_type;
* @bytes_tx: number of bytes sent to device
* @bytes_rx: number of bytes received from device
*
+ * @transfer_bytes_histo:
+ * transfer bytes histogramm
*/
struct spi_statistics {
spinlock_t lock; /* lock for the whole structure */
@@ -68,6 +70,8 @@ struct spi_statistics {
unsigned long long bytes_rx;
unsigned long long bytes_tx;
+#define SPI_STATISTICS_HISTO_SIZE 17
+ unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
};
void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
@@ -250,7 +254,7 @@ static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
return drv ? container_of(drv, struct spi_driver, driver) : NULL;
}
-extern int spi_register_driver(struct spi_driver *sdrv);
+extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv);
/**
* spi_unregister_driver - reverse effect of spi_register_driver
@@ -263,6 +267,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
driver_unregister(&sdrv->driver);
}
+/* use a define to avoid include chaining to get THIS_MODULE */
+#define spi_register_driver(driver) \
+ __spi_register_driver(THIS_MODULE, driver)
+
/**
* module_spi_driver() - Helper macro for registering a SPI driver
* @__spi_driver: spi_driver struct
@@ -843,8 +851,10 @@ extern int spi_bus_unlock(struct spi_master *master);
* @len: data buffer size
* Context: can sleep
*
- * This writes the buffer and returns zero or a negative error code.
+ * This function writes the buffer @buf.
* Callable only from contexts that can sleep.
+ *
+ * Return: zero on success, else a negative error code.
*/
static inline int
spi_write(struct spi_device *spi, const void *buf, size_t len)
@@ -867,8 +877,10 @@ spi_write(struct spi_device *spi, const void *buf, size_t len)
* @len: data buffer size
* Context: can sleep
*
- * This reads the buffer and returns zero or a negative error code.
+ * This function reads the buffer @buf.
* Callable only from contexts that can sleep.
+ *
+ * Return: zero on success, else a negative error code.
*/
static inline int
spi_read(struct spi_device *spi, void *buf, size_t len)
@@ -895,7 +907,7 @@ spi_read(struct spi_device *spi, void *buf, size_t len)
*
* For more specific semantics see spi_sync().
*
- * It returns zero on success, else a negative error code.
+ * Return: Return: zero on success, else a negative error code.
*/
static inline int
spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
@@ -919,9 +931,10 @@ extern int spi_write_then_read(struct spi_device *spi,
* @cmd: command to be written before data is read back
* Context: can sleep
*
- * This returns the (unsigned) eight bit number returned by the
- * device, or else a negative error code. Callable only from
- * contexts that can sleep.
+ * Callable only from contexts that can sleep.
+ *
+ * Return: the (unsigned) eight bit number returned by the
+ * device, or else a negative error code.
*/
static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
{
@@ -940,12 +953,13 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
* @cmd: command to be written before data is read back
* Context: can sleep
*
- * This returns the (unsigned) sixteen bit number returned by the
- * device, or else a negative error code. Callable only from
- * contexts that can sleep.
- *
* The number is returned in wire-order, which is at least sometimes
* big-endian.
+ *
+ * Callable only from contexts that can sleep.
+ *
+ * Return: the (unsigned) sixteen bit number returned by the
+ * device, or else a negative error code.
*/
static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
{
@@ -964,13 +978,13 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
* @cmd: command to be written before data is read back
* Context: can sleep
*
- * This returns the (unsigned) sixteen bit number returned by the device in cpu
- * endianness, or else a negative error code. Callable only from contexts that
- * can sleep.
- *
* This function is similar to spi_w8r16, with the exception that it will
* convert the read 16 bit data word from big-endian to native endianness.
*
+ * Callable only from contexts that can sleep.
+ *
+ * Return: the (unsigned) sixteen bit number returned by the device in cpu
+ * endianness, or else a negative error code.
*/
static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index 85578d4be034..154788ed218c 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -4,7 +4,7 @@
#include <linux/workqueue.h>
struct spi_bitbang {
- spinlock_t lock;
+ struct mutex lock;
u8 busy;
u8 use_dma;
u8 flags; /* extra spi->mode support */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 9f65758311a4..ea090eaf468c 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -268,6 +268,9 @@ int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name,
struct kobject *target, const char *link_name);
void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
const char *link_name);
+int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
+ struct kobject *target_kobj,
+ const char *target_name);
void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
@@ -451,6 +454,14 @@ static inline void sysfs_remove_link_from_group(struct kobject *kobj,
{
}
+static inline int __compat_only_sysfs_link_entry_to_kobj(
+ struct kobject *kobj,
+ struct kobject *target_kobj,
+ const char *target_name)
+{
+ return 0;
+}
+
static inline void sysfs_notify(struct kobject *kobj, const char *dir,
const char *attr)
{
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 8350c538b486..706e63eea080 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -30,6 +30,8 @@
#define TPM_ANY_NUM 0xFFFF
struct tpm_chip;
+struct trusted_key_payload;
+struct trusted_key_options;
struct tpm_class_ops {
const u8 req_complete_mask;
@@ -46,11 +48,22 @@ struct tpm_class_ops {
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
+extern int tpm_is_tpm2(u32 chip_num);
extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
extern int tpm_send(u32 chip_num, void *cmd, size_t buflen);
extern int tpm_get_random(u32 chip_num, u8 *data, size_t max);
+extern int tpm_seal_trusted(u32 chip_num,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options);
+extern int tpm_unseal_trusted(u32 chip_num,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options);
#else
+static inline int tpm_is_tpm2(u32 chip_num)
+{
+ return -ENODEV;
+}
static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) {
return -ENODEV;
}
@@ -63,5 +76,18 @@ static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) {
static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) {
return -ENODEV;
}
+
+static inline int tpm_seal_trusted(u32 chip_num,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ return -ENODEV;
+}
+static inline int tpm_unseal_trusted(u32 chip_num,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ return -ENODEV;
+}
#endif
#endif
diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h
index fa0247ad815f..e14a9370b67e 100644
--- a/include/media/davinci/vpbe_display.h
+++ b/include/media/davinci/vpbe_display.h
@@ -17,6 +17,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-fh.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/davinci/vpbe_types.h>
#include <media/davinci/vpbe_osd.h>
@@ -64,7 +65,7 @@ struct display_layer_info {
};
struct vpbe_disp_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
index 05e7ad5d2c8b..0ab59a571fee 100644
--- a/include/media/lirc_dev.h
+++ b/include/media/lirc_dev.h
@@ -118,6 +118,71 @@ static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf,
return ret;
}
+/**
+ * struct lirc_driver - Defines the parameters on a LIRC driver
+ *
+ * @name: this string will be used for logs
+ *
+ * @minor: indicates minor device (/dev/lirc) number for
+ * registered driver if caller fills it with negative
+ * value, then the first free minor number will be used
+ * (if available).
+ *
+ * @code_length: length of the remote control key code expressed in bits.
+ *
+ * @buffer_size: Number of FIFO buffers with @chunk_size size. If zero,
+ * creates a buffer with BUFLEN size (16 bytes).
+ *
+ * @sample_rate: if zero, the device will wait for an event with a new
+ * code to be parsed. Otherwise, specifies the sample
+ * rate for polling. Value should be between 0
+ * and HZ. If equal to HZ, it would mean one polling per
+ * second.
+ *
+ * @features: lirc compatible hardware features, like LIRC_MODE_RAW,
+ * LIRC_CAN_*, as defined at include/media/lirc.h.
+ *
+ * @chunk_size: Size of each FIFO buffer.
+ *
+ * @data: it may point to any driver data and this pointer will
+ * be passed to all callback functions.
+ *
+ * @min_timeout: Minimum timeout for record. Valid only if
+ * LIRC_CAN_SET_REC_TIMEOUT is defined.
+ *
+ * @max_timeout: Maximum timeout for record. Valid only if
+ * LIRC_CAN_SET_REC_TIMEOUT is defined.
+ *
+ * @add_to_buf: add_to_buf will be called after specified period of the
+ * time or triggered by the external event, this behavior
+ * depends on value of the sample_rate this function will
+ * be called in user context. This routine should return
+ * 0 if data was added to the buffer and -ENODATA if none
+ * was available. This should add some number of bits
+ * evenly divisible by code_length to the buffer.
+ *
+ * @rbuf: if not NULL, it will be used as a read buffer, you will
+ * have to write to the buffer by other means, like irq's
+ * (see also lirc_serial.c).
+ *
+ * @set_use_inc: set_use_inc will be called after device is opened
+ *
+ * @set_use_dec: set_use_dec will be called after device is closed
+ *
+ * @rdev: Pointed to struct rc_dev associated with the LIRC
+ * device.
+ *
+ * @fops: file_operations for drivers which don't fit the current
+ * driver model.
+ * Some ioctl's can be directly handled by lirc_dev if the
+ * driver's ioctl function is NULL or if it returns
+ * -ENOIOCTLCMD (see also lirc_serial.c).
+ *
+ * @dev: pointer to the struct device associated with the LIRC
+ * device.
+ *
+ * @owner: the module owning this struct
+ */
struct lirc_driver {
char name[40];
int minor;
@@ -131,65 +196,16 @@ struct lirc_driver {
void *data;
int min_timeout;
int max_timeout;
- int (*add_to_buf) (void *data, struct lirc_buffer *buf);
+ int (*add_to_buf)(void *data, struct lirc_buffer *buf);
struct lirc_buffer *rbuf;
- int (*set_use_inc) (void *data);
- void (*set_use_dec) (void *data);
+ int (*set_use_inc)(void *data);
+ void (*set_use_dec)(void *data);
struct rc_dev *rdev;
const struct file_operations *fops;
struct device *dev;
struct module *owner;
};
-/* name:
- * this string will be used for logs
- *
- * minor:
- * indicates minor device (/dev/lirc) number for registered driver
- * if caller fills it with negative value, then the first free minor
- * number will be used (if available)
- *
- * code_length:
- * length of the remote control key code expressed in bits
- *
- * sample_rate:
- *
- * data:
- * it may point to any driver data and this pointer will be passed to
- * all callback functions
- *
- * add_to_buf:
- * add_to_buf will be called after specified period of the time or
- * triggered by the external event, this behavior depends on value of
- * the sample_rate this function will be called in user context. This
- * routine should return 0 if data was added to the buffer and
- * -ENODATA if none was available. This should add some number of bits
- * evenly divisible by code_length to the buffer
- *
- * rbuf:
- * if not NULL, it will be used as a read buffer, you will have to
- * write to the buffer by other means, like irq's (see also
- * lirc_serial.c).
- *
- * set_use_inc:
- * set_use_inc will be called after device is opened
- *
- * set_use_dec:
- * set_use_dec will be called after device is closed
- *
- * fops:
- * file_operations for drivers which don't fit the current driver model.
- *
- * Some ioctl's can be directly handled by lirc_dev if the driver's
- * ioctl function is NULL or if it returns -ENOIOCTLCMD (see also
- * lirc_serial.c).
- *
- * owner:
- * the module owning this struct
- *
- */
-
-
/* following functions can be called ONLY from user context
*
* returns negative value on error or minor number
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 0c003d817493..197f93799753 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -116,6 +116,13 @@ static inline u32 media_entity_subtype(struct media_entity *entity)
#define MEDIA_ENTITY_ENUM_MAX_DEPTH 16
#define MEDIA_ENTITY_ENUM_MAX_ID 64
+/*
+ * The number of pads can't be bigger than the number of entities,
+ * as the worse-case scenario is to have one entity linked up to
+ * MEDIA_ENTITY_ENUM_MAX_ID - 1 entities.
+ */
+#define MEDIA_ENTITY_MAX_PADS (MEDIA_ENTITY_ENUM_MAX_ID - 1)
+
struct media_entity_graph {
struct {
struct media_entity *entity;
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 2f6261f3e570..97aa13314bfd 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -18,7 +18,7 @@
#include <linux/pm.h>
#include <linux/videodev2.h>
#include <media/videobuf-core.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
diff --git a/include/media/tuner-types.h b/include/media/tuner-types.h
index ab03c5344209..094e112cc325 100644
--- a/include/media/tuner-types.h
+++ b/include/media/tuner-types.h
@@ -5,6 +5,15 @@
#ifndef __TUNER_TYPES_H__
#define __TUNER_TYPES_H__
+/**
+ * enum param_type - type of the tuner pameters
+ *
+ * @TUNER_PARAM_TYPE_RADIO: Tuner params are for FM and/or AM radio
+ * @TUNER_PARAM_TYPE_PAL: Tuner params are for PAL color TV standard
+ * @TUNER_PARAM_TYPE_SECAM: Tuner params are for SECAM color TV standard
+ * @TUNER_PARAM_TYPE_NTSC: Tuner params are for NTSC color TV standard
+ * @TUNER_PARAM_TYPE_DIGITAL: Tuner params are for digital TV
+ */
enum param_type {
TUNER_PARAM_TYPE_RADIO,
TUNER_PARAM_TYPE_PAL,
@@ -13,97 +22,142 @@ enum param_type {
TUNER_PARAM_TYPE_DIGITAL,
};
+/**
+ * struct tuner_range - define the frequencies supported by the tuner
+ *
+ * @limit: Max frequency supported by that range, in 62.5 kHz
+ * (TV) or 62.5 Hz (Radio), as defined by
+ * V4L2_TUNER_CAP_LOW.
+ * @config: Value of the band switch byte (BB) to setup this mode.
+ * @cb: Value of the CB byte to setup this mode.
+ *
+ * Please notice that digital tuners like xc3028/xc4000/xc5000 don't use
+ * those ranges, as they're defined inside the driver. This is used by
+ * analog tuners that are compatible with the "Philips way" to setup the
+ * tuners. On those devices, the tuner set is done via 4 bytes:
+ * divider byte1 (DB1), divider byte 2 (DB2), Control byte (CB) and
+ * band switch byte (BB).
+ * Some tuners also have an additional optional Auxiliary byte (AB).
+ */
struct tuner_range {
unsigned short limit;
unsigned char config;
unsigned char cb;
};
+/**
+ * struct tuner_params - Parameters to be used to setup the tuner. Those
+ * are used by drivers/media/tuners/tuner-types.c in
+ * order to specify the tuner properties. Most of
+ * the parameters are for tuners based on tda9887 IF-PLL
+ * multi-standard analog TV/Radio demodulator, with is
+ * very common on legacy analog tuners.
+ *
+ * @type: Type of the tuner parameters, as defined at
+ * enum param_type. If the tuner supports multiple
+ * standards, an array should be used, with one
+ * row per different standard.
+ * @cb_first_if_lower_freq: Many Philips-based tuners have a comment in
+ * their datasheet like
+ * "For channel selection involving band
+ * switching, and to ensure smooth tuning to the
+ * desired channel without causing unnecessary
+ * charge pump action, it is recommended to
+ * consider the difference between wanted channel
+ * frequency and the current channel frequency.
+ * Unnecessary charge pump action will result
+ * in very low tuning voltage which may drive the
+ * oscillator to extreme conditions".
+ * Set cb_first_if_lower_freq to 1, if this check
+ * is required for this tuner. I tested this for
+ * PAL by first setting the TV frequency to
+ * 203 MHz and then switching to 96.6 MHz FM
+ * radio. The result was static unless the
+ * control byte was sent first.
+ * @has_tda9887: Set to 1 if this tuner uses a tda9887
+ * @port1_fm_high_sensitivity: Many Philips tuners use tda9887 PORT1 to select
+ * the FM radio sensitivity. If this setting is 1,
+ * then set PORT1 to 1 to get proper FM reception.
+ * @port2_fm_high_sensitivity: Some Philips tuners use tda9887 PORT2 to select
+ * the FM radio sensitivity. If this setting is 1,
+ * then set PORT2 to 1 to get proper FM reception.
+ * @fm_gain_normal: Some Philips tuners use tda9887 cGainNormal to
+ * select the FM radio sensitivity. If this
+ * setting is 1, e register will use cGainNormal
+ * instead of cGainLow.
+ * @intercarrier_mode: Most tuners with a tda9887 use QSS mode.
+ * Some (cheaper) tuners use Intercarrier mode.
+ * If this setting is 1, then the tuner needs to
+ * be set to intercarrier mode.
+ * @port1_active: This setting sets the default value for PORT1.
+ * 0 means inactive, 1 means active. Note: the
+ * actual bit value written to the tda9887 is
+ * inverted. So a 0 here means a 1 in the B6 bit.
+ * @port2_active: This setting sets the default value for PORT2.
+ * 0 means inactive, 1 means active. Note: the
+ * actual bit value written to the tda9887 is
+ * inverted. So a 0 here means a 1 in the B7 bit.
+ * @port1_invert_for_secam_lc: Sometimes PORT1 is inverted when the SECAM-L'
+ * standard is selected. Set this bit to 1 if this
+ * is needed.
+ * @port2_invert_for_secam_lc: Sometimes PORT2 is inverted when the SECAM-L'
+ * standard is selected. Set this bit to 1 if this
+ * is needed.
+ * @port1_set_for_fm_mono: Some cards require PORT1 to be 1 for mono Radio
+ * FM and 0 for stereo.
+ * @default_pll_gating_18: Select 18% (or according to datasheet 0%)
+ * L standard PLL gating, vs the driver default
+ * of 36%.
+ * @radio_if: IF to use in radio mode. Tuners with a
+ * separate radio IF filter seem to use 10.7,
+ * while those without use 33.3 for PAL/SECAM
+ * tuners and 41.3 for NTSC tuners.
+ * 0 = 10.7, 1 = 33.3, 2 = 41.3
+ * @default_top_low: Default tda9887 TOP value in dB for the low
+ * band. Default is 0. Range: -16:+15
+ * @default_top_mid: Default tda9887 TOP value in dB for the mid
+ * band. Default is 0. Range: -16:+15
+ * @default_top_high: Default tda9887 TOP value in dB for the high
+ * band. Default is 0. Range: -16:+15
+ * @default_top_secam_low: Default tda9887 TOP value in dB for SECAM-L/L'
+ * for the low band. Default is 0. Several tuners
+ * require a different TOP value for the
+ * SECAM-L/L' standards. Range: -16:+15
+ * @default_top_secam_mid: Default tda9887 TOP value in dB for SECAM-L/L'
+ * for the mid band. Default is 0. Several tuners
+ * require a different TOP value for the
+ * SECAM-L/L' standards. Range: -16:+15
+ * @default_top_secam_high: Default tda9887 TOP value in dB for SECAM-L/L'
+ * for the high band. Default is 0. Several tuners
+ * require a different TOP value for the
+ * SECAM-L/L' standards. Range: -16:+15
+ * @iffreq: Intermediate frequency (IF) used by the tuner
+ * on digital mode.
+ * @count: Size of the ranges array.
+ * @ranges: Array with the frequency ranges supported by
+ * the tuner.
+ */
struct tuner_params {
enum param_type type;
- /* Many Philips based tuners have a comment like this in their
- * datasheet:
- *
- * For channel selection involving band switching, and to ensure
- * smooth tuning to the desired channel without causing
- * unnecessary charge pump action, it is recommended to consider
- * the difference between wanted channel frequency and the
- * current channel frequency. Unnecessary charge pump action
- * will result in very low tuning voltage which may drive the
- * oscillator to extreme conditions.
- *
- * Set cb_first_if_lower_freq to 1, if this check is
- * required for this tuner.
- *
- * I tested this for PAL by first setting the TV frequency to
- * 203 MHz and then switching to 96.6 MHz FM radio. The result was
- * static unless the control byte was sent first.
- */
unsigned int cb_first_if_lower_freq:1;
- /* Set to 1 if this tuner uses a tda9887 */
unsigned int has_tda9887:1;
- /* Many Philips tuners use tda9887 PORT1 to select the FM radio
- sensitivity. If this setting is 1, then set PORT1 to 1 to
- get proper FM reception. */
unsigned int port1_fm_high_sensitivity:1;
- /* Some Philips tuners use tda9887 PORT2 to select the FM radio
- sensitivity. If this setting is 1, then set PORT2 to 1 to
- get proper FM reception. */
unsigned int port2_fm_high_sensitivity:1;
- /* Some Philips tuners use tda9887 cGainNormal to select the FM radio
- sensitivity. If this setting is 1, e register will use cGainNormal
- instead of cGainLow. */
unsigned int fm_gain_normal:1;
- /* Most tuners with a tda9887 use QSS mode. Some (cheaper) tuners
- use Intercarrier mode. If this setting is 1, then the tuner
- needs to be set to intercarrier mode. */
unsigned int intercarrier_mode:1;
- /* This setting sets the default value for PORT1.
- 0 means inactive, 1 means active. Note: the actual bit
- value written to the tda9887 is inverted. So a 0 here
- means a 1 in the B6 bit. */
unsigned int port1_active:1;
- /* This setting sets the default value for PORT2.
- 0 means inactive, 1 means active. Note: the actual bit
- value written to the tda9887 is inverted. So a 0 here
- means a 1 in the B7 bit. */
unsigned int port2_active:1;
- /* Sometimes PORT1 is inverted when the SECAM-L' standard is selected.
- Set this bit to 1 if this is needed. */
unsigned int port1_invert_for_secam_lc:1;
- /* Sometimes PORT2 is inverted when the SECAM-L' standard is selected.
- Set this bit to 1 if this is needed. */
unsigned int port2_invert_for_secam_lc:1;
- /* Some cards require PORT1 to be 1 for mono Radio FM and 0 for stereo. */
unsigned int port1_set_for_fm_mono:1;
- /* Select 18% (or according to datasheet 0%) L standard PLL gating,
- vs the driver default of 36%. */
unsigned int default_pll_gating_18:1;
- /* IF to use in radio mode. Tuners with a separate radio IF filter
- seem to use 10.7, while those without use 33.3 for PAL/SECAM tuners
- and 41.3 for NTSC tuners. 0 = 10.7, 1 = 33.3, 2 = 41.3 */
unsigned int radio_if:2;
- /* Default tda9887 TOP value in dB for the low band. Default is 0.
- Range: -16:+15 */
signed int default_top_low:5;
- /* Default tda9887 TOP value in dB for the mid band. Default is 0.
- Range: -16:+15 */
signed int default_top_mid:5;
- /* Default tda9887 TOP value in dB for the high band. Default is 0.
- Range: -16:+15 */
signed int default_top_high:5;
- /* Default tda9887 TOP value in dB for SECAM-L/L' for the low band.
- Default is 0. Several tuners require a different TOP value for
- the SECAM-L/L' standards. Range: -16:+15 */
signed int default_top_secam_low:5;
- /* Default tda9887 TOP value in dB for SECAM-L/L' for the mid band.
- Default is 0. Several tuners require a different TOP value for
- the SECAM-L/L' standards. Range: -16:+15 */
signed int default_top_secam_mid:5;
- /* Default tda9887 TOP value in dB for SECAM-L/L' for the high band.
- Default is 0. Several tuners require a different TOP value for
- the SECAM-L/L' standards. Range: -16:+15 */
signed int default_top_secam_high:5;
u16 iffreq;
diff --git a/include/media/tuner.h b/include/media/tuner.h
index b46ebb48fe74..486b6a54363b 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -1,23 +1,19 @@
/*
- tuner.h - definition for different tuners
-
- Copyright (C) 1997 Markus Schroeder (schroedm@uni-duesseldorf.de)
- minor modifications by Ralph Metzler (rjkm@thp.uni-koeln.de)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+ * tuner.h - definition for different tuners
+ *
+ * Copyright (C) 1997 Markus Schroeder (schroedm@uni-duesseldorf.de)
+ * minor modifications by Ralph Metzler (rjkm@thp.uni-koeln.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _TUNER_H
#define _TUNER_H
@@ -83,8 +79,11 @@
#define TUNER_PHILIPS_FM1236_MK3 43
#define TUNER_PHILIPS_4IN1 44 /* ATI TV Wonder Pro - Conexant */
-/* Microtune merged with Temic 12/31/1999 partially financed by Alps - these may be similar to Temic */
-#define TUNER_MICROTUNE_4049FM5 45
+ /*
+ * Microtune merged with Temic 12/31/1999 partially financed by Alps.
+ * these may be similar to Temic
+ */
+#define TUNER_MICROTUNE_4049FM5 45
#define TUNER_PANASONIC_VP27 46
#define TUNER_LG_NTSC_TAPE 47
@@ -115,11 +114,11 @@
#define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */
#define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */
-#define TUNER_SAMSUNG_TCPN_2121P30A 70 /* Hauppauge PVR-500MCE NTSC */
+#define TUNER_SAMSUNG_TCPN_2121P30A 70 /* Hauppauge PVR-500MCE NTSC */
#define TUNER_XC2028 71
#define TUNER_THOMSON_FE6600 72 /* DViCO FusionHDTV DVB-T Hybrid */
-#define TUNER_SAMSUNG_TCPG_6121P30A 73 /* Hauppauge PVR-500 PAL */
+#define TUNER_SAMSUNG_TCPG_6121P30A 73 /* Hauppauge PVR-500 PAL */
#define TUNER_TDA9887 74 /* This tuner should be used only internally */
#define TUNER_TEA5761 75 /* Only FM Radio Tuner */
#define TUNER_XC5000 76 /* Xceive Silicon Tuner */
@@ -143,57 +142,92 @@
#define TUNER_SONY_BTF_PB463Z 91 /* NTSC */
/* tv card specific */
-#define TDA9887_PRESENT (1<<0)
-#define TDA9887_PORT1_INACTIVE (1<<1)
-#define TDA9887_PORT2_INACTIVE (1<<2)
-#define TDA9887_QSS (1<<3)
-#define TDA9887_INTERCARRIER (1<<4)
-#define TDA9887_PORT1_ACTIVE (1<<5)
-#define TDA9887_PORT2_ACTIVE (1<<6)
-#define TDA9887_INTERCARRIER_NTSC (1<<7)
+#define TDA9887_PRESENT (1<<0)
+#define TDA9887_PORT1_INACTIVE (1<<1)
+#define TDA9887_PORT2_INACTIVE (1<<2)
+#define TDA9887_QSS (1<<3)
+#define TDA9887_INTERCARRIER (1<<4)
+#define TDA9887_PORT1_ACTIVE (1<<5)
+#define TDA9887_PORT2_ACTIVE (1<<6)
+#define TDA9887_INTERCARRIER_NTSC (1<<7)
/* Tuner takeover point adjustment, in dB, -16 <= top <= 15 */
-#define TDA9887_TOP_MASK (0x3f << 8)
-#define TDA9887_TOP_SET (1 << 13)
-#define TDA9887_TOP(top) (TDA9887_TOP_SET | (((16 + (top)) & 0x1f) << 8))
+#define TDA9887_TOP_MASK (0x3f << 8)
+#define TDA9887_TOP_SET (1 << 13)
+#define TDA9887_TOP(top) (TDA9887_TOP_SET | \
+ (((16 + (top)) & 0x1f) << 8))
/* config options */
-#define TDA9887_DEEMPHASIS_MASK (3<<16)
-#define TDA9887_DEEMPHASIS_NONE (1<<16)
-#define TDA9887_DEEMPHASIS_50 (2<<16)
-#define TDA9887_DEEMPHASIS_75 (3<<16)
-#define TDA9887_AUTOMUTE (1<<18)
+#define TDA9887_DEEMPHASIS_MASK (3<<16)
+#define TDA9887_DEEMPHASIS_NONE (1<<16)
+#define TDA9887_DEEMPHASIS_50 (2<<16)
+#define TDA9887_DEEMPHASIS_75 (3<<16)
+#define TDA9887_AUTOMUTE (1<<18)
#define TDA9887_GATING_18 (1<<19)
#define TDA9887_GAIN_NORMAL (1<<20)
#define TDA9887_RIF_41_3 (1<<21) /* radio IF1 41.3 vs 33.3 */
+/**
+ * enum tuner_mode - Mode of the tuner
+ *
+ * @T_RADIO: Tuner core will work in radio mode
+ * @T_ANALOG_TV: Tuner core will work in analog TV mode
+ *
+ * Older boards only had a single tuner device, but some devices have a
+ * separate tuner for radio. In any case, the tuner-core needs to know if
+ * the tuner chip(s) will be used in radio mode or analog TV mode, as, on
+ * radio mode, frequencies are specified on a different range than on TV
+ * mode. This enum is used by the tuner core in order to work with the
+ * proper tuner range and eventually use a different tuner chip while in
+ * radio mode.
+ */
enum tuner_mode {
T_RADIO = 1 << V4L2_TUNER_RADIO,
T_ANALOG_TV = 1 << V4L2_TUNER_ANALOG_TV,
- /* Don't need to map V4L2_TUNER_DIGITAL_TV, as tuner-core won't use it */
+ /* Don't map V4L2_TUNER_DIGITAL_TV, as tuner-core won't use it */
};
-/* Older boards only had a single tuner device. Nowadays multiple tuner
- devices may be present on a single board. Using TUNER_SET_TYPE_ADDR
- to pass the tuner_setup structure it is possible to setup each tuner
- device in turn.
-
- Since multiple devices may be present it is no longer sufficient to
- send a command to a single i2c device. Instead you should broadcast
- the command to all i2c devices.
-
- By setting the mode_mask correctly you can select which commands are
- accepted by a specific tuner device. For example, set mode_mask to
- T_RADIO if the device is a radio-only tuner. That specific tuner will
- only accept commands when the tuner is in radio mode and ignore them
- when the tuner is set to TV mode.
+/**
+ * struct tuner_setup - setup the tuner chipsets
+ *
+ * @addr: I2C address used to control the tuner device/chipset
+ * @type: Type of the tuner, as defined at the TUNER_* macros.
+ * Each different tuner model should have an unique
+ * identifier.
+ * @mode_mask: Mask with the allowed tuner modes: V4L2_TUNER_RADIO,
+ * V4L2_TUNER_ANALOG_TV and/or V4L2_TUNER_DIGITAL_TV,
+ * describing if the tuner should be used to support
+ * Radio, analog TV and/or digital TV.
+ * @config: Used to send tuner-specific configuration for complex
+ * tuners that require extra parameters to be set.
+ * Only a very few tuners require it and its usage on
+ * newer tuners should be avoided.
+ * @tuner_callback: Some tuners require to call back the bridge driver,
+ * in order to do some tasks like rising a GPIO at the
+ * bridge chipset, in order to do things like resetting
+ * the device.
+ *
+ * Older boards only had a single tuner device. Nowadays multiple tuner
+ * devices may be present on a single board. Using TUNER_SET_TYPE_ADDR
+ * to pass the tuner_setup structure it is possible to setup each tuner
+ * device in turn.
+ *
+ * Since multiple devices may be present it is no longer sufficient to
+ * send a command to a single i2c device. Instead you should broadcast
+ * the command to all i2c devices.
+ *
+ * By setting the mode_mask correctly you can select which commands are
+ * accepted by a specific tuner device. For example, set mode_mask to
+ * T_RADIO if the device is a radio-only tuner. That specific tuner will
+ * only accept commands when the tuner is in radio mode and ignore them
+ * when the tuner is set to TV mode.
*/
struct tuner_setup {
- unsigned short addr; /* I2C address */
- unsigned int type; /* Tuner type */
- unsigned int mode_mask; /* Allowed tuner modes */
- void *config; /* configuraion for more complex tuners */
- int (*tuner_callback) (void *dev, int component, int cmd, int arg);
+ unsigned short addr;
+ unsigned int type;
+ unsigned int mode_mask;
+ void *config;
+ int (*tuner_callback)(void *dev, int component, int cmd, int arg);
};
#endif /* __KERNEL__ */
diff --git a/include/media/tveeprom.h b/include/media/tveeprom.h
index f7119ee3977b..8be898739e0c 100644
--- a/include/media/tveeprom.h
+++ b/include/media/tveeprom.h
@@ -1,28 +1,63 @@
+
/*
+ * tveeprom - Contains structures and functions to work with Hauppauge
+ * eeproms.
*/
+#include <linux/if_ether.h>
+
+/**
+ * enum tveeprom_audio_processor - Specifies the type of audio processor
+ * used on a Hauppauge device.
+ *
+ * @TVEEPROM_AUDPROC_NONE: No audio processor present
+ * @TVEEPROM_AUDPROC_INTERNAL: The audio processor is internal to the
+ * video processor
+ * @TVEEPROM_AUDPROC_MSP: The audio processor is a MSPXXXX device
+ * @TVEEPROM_AUDPROC_OTHER: The audio processor is another device
+ */
enum tveeprom_audio_processor {
- /* No audio processor present */
TVEEPROM_AUDPROC_NONE,
- /* The audio processor is internal to the video processor */
TVEEPROM_AUDPROC_INTERNAL,
- /* The audio processor is a MSPXXXX device */
TVEEPROM_AUDPROC_MSP,
- /* The audio processor is another device */
TVEEPROM_AUDPROC_OTHER,
};
-#include <linux/if_ether.h>
-
+/**
+ * struct tveeprom - Contains the fields parsed from Hauppauge eeproms
+ *
+ * @has_radio: 1 if the device has radio; 0 otherwise.
+ * @has_ir: If has_ir == 0, then it is unknown what the IR
+ * capabilities are. Otherwise:
+ * bit 0) 1 (= IR capabilities are known);
+ * bit 1) IR receiver present;
+ * bit 2) IR transmitter (blaster) present.
+ * @has_MAC_address: 0: no MAC, 1: MAC present, 2: unknown.
+ * @tuner_type: type of the tuner (TUNER_*, as defined at
+ * include/media/tuner.h).
+ * @tuner_formats: Supported analog TV standards (V4L2_STD_*).
+ * @tuner_hauppauge_model: Hauppauge's code for the device model number.
+ * @tuner2_type: type of the second tuner (TUNER_*, as defined
+ * at include/media/tuner.h).
+ * @tuner2_formats: Tuner 2 supported analog TV standards
+ * (V4L2_STD_*).
+ * @tuner2_hauppauge_model: tuner 2 Hauppauge's code for the device model
+ * number.
+ * @audio_processor: analog audio decoder, as defined by enum
+ * tveeprom_audio_processor.
+ * @decoder_processor: Hauppauge's code for the decoder chipset.
+ * Unused by the drivers, as they probe the
+ * decoder based on the PCI or USB ID.
+ * @model: Hauppauge's model number
+ * @revision: Card revision number
+ * @serial_number: Card's serial number
+ * @rev_str: Card revision converted to number
+ * @MAC_address: MAC address for the network interface
+ */
struct tveeprom {
u32 has_radio;
- /* If has_ir == 0, then it is unknown what the IR capabilities are,
- otherwise:
- bit 0: 1 (= IR capabilities are known)
- bit 1: IR receiver present
- bit 2: IR transmitter (blaster) present */
u32 has_ir;
- u32 has_MAC_address; /* 0: no MAC, 1: MAC present, 2: unknown */
+ u32 has_MAC_address;
u32 tuner_type;
u32 tuner_formats;
@@ -32,9 +67,6 @@ struct tveeprom {
u32 tuner2_formats;
u32 tuner2_hauppauge_model;
- u32 digitizer;
- u32 digitizer_formats;
-
u32 audio_processor;
u32 decoder_processor;
@@ -45,7 +77,28 @@ struct tveeprom {
u8 MAC_address[ETH_ALEN];
};
+/**
+ * tveeprom_hauppauge_analog - Fill struct tveeprom using the contents
+ * of the eeprom previously filled at
+ * @eeprom_data field.
+ *
+ * @c: I2C client struct
+ * @tvee: Struct to where the eeprom parsed data will be filled;
+ * @eeprom_data: Array with the contents of the eeprom_data. It should
+ * contain 256 bytes filled with the contents of the
+ * eeprom read from the Hauppauge device.
+ */
void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
unsigned char *eeprom_data);
+/**
+ * tveeprom_read - Reads the contents of the eeprom found at the Hauppauge
+ * devices.
+ *
+ * @c: I2C client struct
+ * @eedata: Array where the eeprom content will be stored.
+ * @len: Size of @eedata array. If the eeprom content will be latter
+ * be parsed by tveeprom_hauppauge_analog(), len should be, at
+ * least, 256.
+ */
int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len);
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index b6130b50a0f1..a209526b6014 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -23,7 +23,7 @@
#include <linux/videodev2.h>
-/**
+/*
* v4l2_dv_timings_presets: list of all dv_timings presets.
*/
extern const struct v4l2_dv_timings v4l2_dv_timings_presets[];
@@ -127,16 +127,16 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
/**
* v4l2_detect_cvt - detect if the given timings follow the CVT standard
*
- * @frame_height - the total height of the frame (including blanking) in lines.
- * @hfreq - the horizontal frequency in Hz.
- * @vsync - the height of the vertical sync in lines.
- * @active_width - active width of image (does not include blanking). This
+ * @frame_height: the total height of the frame (including blanking) in lines.
+ * @hfreq: the horizontal frequency in Hz.
+ * @vsync: the height of the vertical sync in lines.
+ * @active_width: active width of image (does not include blanking). This
* information is needed only in case of version 2 of reduced blanking.
* In other cases, this parameter does not have any effect on timings.
- * @polarities - the horizontal and vertical polarities (same as struct
+ * @polarities: the horizontal and vertical polarities (same as struct
* v4l2_bt_timings polarities).
- * @interlaced - if this flag is true, it indicates interlaced format
- * @fmt - the resulting timings.
+ * @interlaced: if this flag is true, it indicates interlaced format
+ * @fmt: the resulting timings.
*
* This function will attempt to detect if the given values correspond to a
* valid CVT format. If so, then it will return true, and fmt will be filled
@@ -149,18 +149,18 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
/**
* v4l2_detect_gtf - detect if the given timings follow the GTF standard
*
- * @frame_height - the total height of the frame (including blanking) in lines.
- * @hfreq - the horizontal frequency in Hz.
- * @vsync - the height of the vertical sync in lines.
- * @polarities - the horizontal and vertical polarities (same as struct
+ * @frame_height: the total height of the frame (including blanking) in lines.
+ * @hfreq: the horizontal frequency in Hz.
+ * @vsync: the height of the vertical sync in lines.
+ * @polarities: the horizontal and vertical polarities (same as struct
* v4l2_bt_timings polarities).
- * @interlaced - if this flag is true, it indicates interlaced format
- * @aspect - preferred aspect ratio. GTF has no method of determining the
+ * @interlaced: if this flag is true, it indicates interlaced format
+ * @aspect: preferred aspect ratio. GTF has no method of determining the
* aspect ratio in order to derive the image width from the
* image height, so it has to be passed explicitly. Usually
* the native screen aspect ratio is used for this. If it
* is not filled in correctly, then 16:9 will be assumed.
- * @fmt - the resulting timings.
+ * @fmt: the resulting timings.
*
* This function will attempt to detect if the given values correspond to a
* valid GTF format. If so, then it will return true, and fmt will be filled
@@ -174,8 +174,8 @@ bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
* v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
* 0x15 and 0x16 from the EDID.
*
- * @hor_landscape - byte 0x15 from the EDID.
- * @vert_portrait - byte 0x16 from the EDID.
+ * @hor_landscape: byte 0x15 from the EDID.
+ * @vert_portrait: byte 0x16 from the EDID.
*
* Determines the aspect ratio from the EDID.
* See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2:
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 8fbbd76d78e8..017ffb2220c7 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -36,6 +36,8 @@ struct v4l2_ioctl_ops {
struct v4l2_fmtdesc *f);
int (*vidioc_enum_fmt_sdr_cap) (struct file *file, void *fh,
struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_sdr_out) (struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
/* VIDIOC_G_FMT handlers */
int (*vidioc_g_fmt_vid_cap) (struct file *file, void *fh,
@@ -60,6 +62,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_g_fmt_sdr_cap) (struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_g_fmt_sdr_out) (struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_S_FMT handlers */
int (*vidioc_s_fmt_vid_cap) (struct file *file, void *fh,
@@ -84,6 +88,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_s_fmt_sdr_cap) (struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_s_fmt_sdr_out) (struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_TRY_FMT handlers */
int (*vidioc_try_fmt_vid_cap) (struct file *file, void *fh,
@@ -108,6 +114,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_try_fmt_sdr_cap) (struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_try_fmt_sdr_out) (struct file *file, void *fh,
+ struct v4l2_format *f);
/* Buffer handlers */
int (*vidioc_reqbufs) (struct file *file, void *fh, struct v4l2_requestbuffers *b);
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 8849aaba6aa5..5a9597dd1ee0 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -17,7 +17,7 @@
#ifndef _MEDIA_V4L2_MEM2MEM_H
#define _MEDIA_V4L2_MEM2MEM_H
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
/**
* struct v4l2_m2m_ops - mem-to-mem device driver callbacks
@@ -90,7 +90,7 @@ struct v4l2_m2m_ctx {
};
struct v4l2_m2m_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
@@ -105,9 +105,9 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx);
static inline void
-v4l2_m2m_buf_done(struct vb2_buffer *buf, enum vb2_buffer_state state)
+v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
{
- vb2_buffer_done(buf, state);
+ vb2_buffer_done(&buf->vb2_buf, state);
}
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
@@ -160,7 +160,8 @@ static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
-void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb);
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf);
/**
* v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 589b56c68400..647ebfe5174f 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -1,5 +1,5 @@
/*
- * videobuf2-core.h - V4L2 driver helper framework
+ * videobuf2-core.h - Video Buffer 2 Core Framework
*
* Copyright (C) 2010 Samsung Electronics
*
@@ -15,9 +15,18 @@
#include <linux/mm_types.h>
#include <linux/mutex.h>
#include <linux/poll.h>
-#include <linux/videodev2.h>
#include <linux/dma-buf.h>
+#define VB2_MAX_FRAME (32)
+#define VB2_MAX_PLANES (8)
+
+enum vb2_memory {
+ VB2_MEMORY_UNKNOWN = 0,
+ VB2_MEMORY_MMAP = 1,
+ VB2_MEMORY_USERPTR = 2,
+ VB2_MEMORY_DMABUF = 4,
+};
+
struct vb2_alloc_ctx;
struct vb2_fileio_data;
struct vb2_threadio_data;
@@ -36,6 +45,8 @@ struct vb2_threadio_data;
* no other users of this buffer are present); the buf_priv
* argument is the allocator private per-buffer structure
* previously returned from the alloc callback.
+ * @get_dmabuf: acquire userspace memory for a hardware operation; used for
+ * DMABUF memory types.
* @get_userptr: acquire userspace memory for a hardware operation; used for
* USERPTR memory types; vaddr is the address passed to the
* videobuf layer when queuing a video buffer of USERPTR type;
@@ -111,10 +122,40 @@ struct vb2_mem_ops {
int (*mmap)(void *buf_priv, struct vm_area_struct *vma);
};
+/**
+ * struct vb2_plane - plane information
+ * @mem_priv: private data with this plane
+ * @dbuf: dma_buf - shared buffer object
+ * @dbuf_mapped: flag to show whether dbuf is mapped or not
+ * @bytesused: number of bytes occupied by data in the plane (payload)
+ * @length: size of this plane (NOT the payload) in bytes
+ * @offset: when memory in the associated struct vb2_buffer is
+ * VB2_MEMORY_MMAP, equals the offset from the start of
+ * the device memory for this plane (or is a "cookie" that
+ * should be passed to mmap() called on the video node)
+ * @userptr: when memory is VB2_MEMORY_USERPTR, a userspace pointer
+ * pointing to this plane
+ * @fd: when memory is VB2_MEMORY_DMABUF, a userspace file
+ * descriptor associated with this plane
+ * @m: Union with memtype-specific data (@offset, @userptr or
+ * @fd).
+ * @data_offset: offset in the plane to the start of data; usually 0,
+ * unless there is a header in front of the data
+ * Should contain enough information to be able to cover all the fields
+ * of struct v4l2_plane at videodev2.h
+ */
struct vb2_plane {
void *mem_priv;
struct dma_buf *dbuf;
unsigned int dbuf_mapped;
+ unsigned int bytesused;
+ unsigned int length;
+ union {
+ unsigned int offset;
+ unsigned long userptr;
+ int fd;
+ } m;
+ unsigned int data_offset;
};
/**
@@ -163,43 +204,34 @@ struct vb2_queue;
/**
* struct vb2_buffer - represents a video buffer
- * @v4l2_buf: struct v4l2_buffer associated with this buffer; can
- * be read by the driver and relevant entries can be
- * changed by the driver in case of CAPTURE types
- * (such as timestamp)
- * @v4l2_planes: struct v4l2_planes associated with this buffer; can
- * be read by the driver and relevant entries can be
- * changed by the driver in case of CAPTURE types
- * (such as bytesused); NOTE that even for single-planar
- * types, the v4l2_planes[0] struct should be used
- * instead of v4l2_buf for filling bytesused - drivers
- * should use the vb2_set_plane_payload() function for that
* @vb2_queue: the queue to which this driver belongs
+ * @index: id number of the buffer
+ * @type: buffer type
+ * @memory: the method, in which the actual data is passed
* @num_planes: number of planes in the buffer
* on an internal driver queue
- * @state: current buffer state; do not change
- * @queued_entry: entry on the queued buffers list, which holds all
- * buffers queued from userspace
- * @done_entry: entry on the list that stores all buffers ready to
- * be dequeued to userspace
* @planes: private per-plane information; do not change
*/
struct vb2_buffer {
- struct v4l2_buffer v4l2_buf;
- struct v4l2_plane v4l2_planes[VIDEO_MAX_PLANES];
-
struct vb2_queue *vb2_queue;
-
+ unsigned int index;
+ unsigned int type;
+ unsigned int memory;
unsigned int num_planes;
-
-/* Private: internal use only */
+ struct vb2_plane planes[VB2_MAX_PLANES];
+
+ /* private: internal use only
+ *
+ * state: current buffer state; do not change
+ * queued_entry: entry on the queued buffers list, which holds
+ * all buffers queued from userspace
+ * done_entry: entry on the list that stores all buffers ready
+ * to be dequeued to userspace
+ */
enum vb2_buffer_state state;
struct list_head queued_entry;
struct list_head done_entry;
-
- struct vb2_plane planes[VIDEO_MAX_PLANES];
-
#ifdef CONFIG_VIDEO_ADV_DEBUG
/*
* Counters for how often these buffer-related ops are
@@ -312,7 +344,7 @@ struct vb2_buffer {
* pre-queued buffers before calling STREAMON.
*/
struct vb2_ops {
- int (*queue_setup)(struct vb2_queue *q, const struct v4l2_format *fmt,
+ int (*queue_setup)(struct vb2_queue *q, const void *parg,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[]);
@@ -330,12 +362,19 @@ struct vb2_ops {
void (*buf_queue)(struct vb2_buffer *vb);
};
-struct v4l2_fh;
+struct vb2_buf_ops {
+ int (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
+ int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
+ struct vb2_plane *planes);
+ int (*set_timestamp)(struct vb2_buffer *vb, const void *pb);
+};
/**
* struct vb2_queue - a videobuf queue
*
- * @type: queue type (see V4L2_BUF_TYPE_* in linux/videodev2.h
+ * @type: private buffer type whose content is defined by the vb2-core
+ * caller. For example, for V4L2, it should match
+ * the V4L2_BUF_TYPE_* in include/uapi/linux/videodev2.h
* @io_modes: supported io methods (see vb2_io_modes enum)
* @fileio_read_once: report EOF after reading the first buffer
* @fileio_write_immediately: queue buffer after each write() call
@@ -351,10 +390,13 @@ struct v4l2_fh;
* drivers to easily associate an owner filehandle with the queue.
* @ops: driver-specific callbacks
* @mem_ops: memory allocator specific callbacks
+ * @buf_ops: callbacks to deliver buffer information
+ * between user-space and kernel-space
* @drv_priv: driver private data
* @buf_struct_size: size of the driver-specific buffer structure;
* "0" indicates the driver doesn't want to use a custom buffer
- * structure type, so sizeof(struct vb2_buffer) will is used
+ * structure type. for example, sizeof(struct vb2_v4l2_buffer)
+ * will be used for v4l2.
* @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and
* V4L2_BUF_FLAG_TSTAMP_SRC_*
* @gfp_flags: additional gfp flags used when allocating the buffers.
@@ -385,6 +427,8 @@ struct v4l2_fh;
* @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
* buffers. Only set for capture queues if qbuf has not yet been
* called since poll() needs to return POLLERR in that situation.
+ * @is_multiplanar: set if buffer type is multiplanar
+ * @is_output: set if buffer type is output
* @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the
* last decoded buffer was already dequeued. Set for capture queues
* when a buffer with the V4L2_BUF_FLAG_LAST is dequeued.
@@ -392,17 +436,19 @@ struct v4l2_fh;
* @threadio: thread io internal data, used only if thread is active
*/
struct vb2_queue {
- enum v4l2_buf_type type;
+ unsigned int type;
unsigned int io_modes;
unsigned fileio_read_once:1;
unsigned fileio_write_immediately:1;
unsigned allow_zero_bytesused:1;
struct mutex *lock;
- struct v4l2_fh *owner;
+ void *owner;
const struct vb2_ops *ops;
const struct vb2_mem_ops *mem_ops;
+ const struct vb2_buf_ops *buf_ops;
+
void *drv_priv;
unsigned int buf_struct_size;
u32 timestamp_flags;
@@ -411,8 +457,8 @@ struct vb2_queue {
/* private: internal use only */
struct mutex mmap_lock;
- enum v4l2_memory memory;
- struct vb2_buffer *bufs[VIDEO_MAX_FRAME];
+ unsigned int memory;
+ struct vb2_buffer *bufs[VB2_MAX_FRAME];
unsigned int num_buffers;
struct list_head queued_list;
@@ -423,13 +469,15 @@ struct vb2_queue {
spinlock_t done_lock;
wait_queue_head_t done_wq;
- void *alloc_ctx[VIDEO_MAX_PLANES];
- unsigned int plane_sizes[VIDEO_MAX_PLANES];
+ void *alloc_ctx[VB2_MAX_PLANES];
+ unsigned int plane_sizes[VB2_MAX_PLANES];
unsigned int streaming:1;
unsigned int start_streaming_called:1;
unsigned int error:1;
unsigned int waiting_for_buffers:1;
+ unsigned int is_multiplanar:1;
+ unsigned int is_output:1;
unsigned int last_buffer_dequeued:1;
struct vb2_fileio_data *fileio;
@@ -455,23 +503,25 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
void vb2_discard_done(struct vb2_queue *q);
int vb2_wait_for_all_buffers(struct vb2_queue *q);
-int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
-int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
+int vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
+int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
+ unsigned int *count);
+int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
+ unsigned int *count, const void *parg);
+int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
+int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
+int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking);
-int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
-int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
+int vb2_core_streamon(struct vb2_queue *q, unsigned int type);
+int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
-int __must_check vb2_queue_init(struct vb2_queue *q);
-
-void vb2_queue_release(struct vb2_queue *q);
-void vb2_queue_error(struct vb2_queue *q);
+int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
+ unsigned int index, unsigned int plane, unsigned int flags);
-int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
-int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb);
-int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking);
+int vb2_core_queue_init(struct vb2_queue *q);
+void vb2_core_queue_release(struct vb2_queue *q);
-int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type);
-int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type);
+void vb2_queue_error(struct vb2_queue *q);
int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma);
#ifndef CONFIG_MMU
@@ -481,41 +531,6 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
unsigned long pgoff,
unsigned long flags);
#endif
-unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
-size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
- loff_t *ppos, int nonblock);
-size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
- loff_t *ppos, int nonblock);
-
-/*
- * vb2_thread_fnc - callback function for use with vb2_thread
- *
- * This is called whenever a buffer is dequeued in the thread.
- */
-typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv);
-
-/**
- * vb2_thread_start() - start a thread for the given queue.
- * @q: videobuf queue
- * @fnc: callback function
- * @priv: priv pointer passed to the callback function
- * @thread_name:the name of the thread. This will be prefixed with "vb2-".
- *
- * This starts a thread that will queue and dequeue until an error occurs
- * or @vb2_thread_stop is called.
- *
- * This function should not be used for anything else but the videobuf2-dvb
- * support. If you think you have another good use-case for this, then please
- * contact the linux-media mailinglist first.
- */
-int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
- const char *thread_name);
-
-/**
- * vb2_thread_stop() - stop the thread for the given queue.
- * @q: videobuf queue
- */
-int vb2_thread_stop(struct vb2_queue *q);
/**
* vb2_is_streaming() - return streaming status of the queue
@@ -573,7 +588,7 @@ static inline void vb2_set_plane_payload(struct vb2_buffer *vb,
unsigned int plane_no, unsigned long size)
{
if (plane_no < vb->num_planes)
- vb->v4l2_planes[plane_no].bytesused = size;
+ vb->planes[plane_no].bytesused = size;
}
/**
@@ -585,7 +600,7 @@ static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb,
unsigned int plane_no)
{
if (plane_no < vb->num_planes)
- return vb->v4l2_planes[plane_no].bytesused;
+ return vb->planes[plane_no].bytesused;
return 0;
}
@@ -598,7 +613,7 @@ static inline unsigned long
vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
{
if (plane_no < vb->num_planes)
- return vb->v4l2_planes[plane_no].length;
+ return vb->planes[plane_no].length;
return 0;
}
@@ -620,48 +635,4 @@ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
q->last_buffer_dequeued = false;
}
-/*
- * The following functions are not part of the vb2 core API, but are simple
- * helper functions that you can use in your struct v4l2_file_operations,
- * struct v4l2_ioctl_ops and struct vb2_ops. They will serialize if vb2_queue->lock
- * or video_device->lock is set, and they will set and test vb2_queue->owner
- * to check if the calling filehandle is permitted to do the queuing operation.
- */
-
-/* struct v4l2_ioctl_ops helpers */
-
-int vb2_ioctl_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p);
-int vb2_ioctl_create_bufs(struct file *file, void *priv,
- struct v4l2_create_buffers *p);
-int vb2_ioctl_prepare_buf(struct file *file, void *priv,
- struct v4l2_buffer *p);
-int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p);
-int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p);
-int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p);
-int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i);
-int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i);
-int vb2_ioctl_expbuf(struct file *file, void *priv,
- struct v4l2_exportbuffer *p);
-
-/* struct v4l2_file_operations helpers */
-
-int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma);
-int vb2_fop_release(struct file *file);
-int _vb2_fop_release(struct file *file, struct mutex *lock);
-ssize_t vb2_fop_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos);
-ssize_t vb2_fop_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos);
-unsigned int vb2_fop_poll(struct file *file, poll_table *wait);
-#ifndef CONFIG_MMU
-unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags);
-#endif
-
-/* struct vb2_ops helpers, only use if vq->lock is non-NULL. */
-
-void vb2_ops_wait_prepare(struct vb2_queue *vq);
-void vb2_ops_wait_finish(struct vb2_queue *vq);
-
#endif /* _MEDIA_VIDEOBUF2_CORE_H */
diff --git a/include/media/videobuf2-dma-contig.h b/include/media/videobuf2-dma-contig.h
index 8197f87d6c61..c33dfa69d7ab 100644
--- a/include/media/videobuf2-dma-contig.h
+++ b/include/media/videobuf2-dma-contig.h
@@ -13,7 +13,7 @@
#ifndef _MEDIA_VIDEOBUF2_DMA_CONTIG_H
#define _MEDIA_VIDEOBUF2_DMA_CONTIG_H
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <linux/dma-mapping.h>
static inline dma_addr_t
diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h
index 14ce3068b642..8d1083f83c3d 100644
--- a/include/media/videobuf2-dma-sg.h
+++ b/include/media/videobuf2-dma-sg.h
@@ -13,7 +13,7 @@
#ifndef _MEDIA_VIDEOBUF2_DMA_SG_H
#define _MEDIA_VIDEOBUF2_DMA_SG_H
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
static inline struct sg_table *vb2_dma_sg_plane_desc(
struct vb2_buffer *vb, unsigned int plane_no)
diff --git a/include/media/videobuf2-dvb.h b/include/media/videobuf2-dvb.h
index 8f61456f1394..5b64c9eac2c9 100644
--- a/include/media/videobuf2-dvb.h
+++ b/include/media/videobuf2-dvb.h
@@ -6,7 +6,13 @@
#include <dvb_demux.h>
#include <dvb_net.h>
#include <dvb_frontend.h>
-#include <media/videobuf2-core.h>
+
+#include <media/videobuf2-v4l2.h>
+/*
+ * TODO: This header file should be replaced with videobuf2-core.h
+ * Currently, vb2_thread is not a stuff of videobuf2-core,
+ * since vb2_thread has many dependencies on videobuf2-v4l2.
+ */
struct vb2_dvb {
/* filling that the job of the driver */
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index 6513c7ec3116..36565c7acb54 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -14,7 +14,7 @@
#ifndef _MEDIA_VIDEOBUF2_MEMOPS_H
#define _MEDIA_VIDEOBUF2_MEMOPS_H
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <linux/mm.h>
/**
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
new file mode 100644
index 000000000000..5abab1e7c7e8
--- /dev/null
+++ b/include/media/videobuf2-v4l2.h
@@ -0,0 +1,149 @@
+/*
+ * videobuf2-v4l2.h - V4L2 driver helper framework
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#ifndef _MEDIA_VIDEOBUF2_V4L2_H
+#define _MEDIA_VIDEOBUF2_V4L2_H
+
+#include <linux/videodev2.h>
+#include <media/videobuf2-core.h>
+
+#if VB2_MAX_FRAME != VIDEO_MAX_FRAME
+#error VB2_MAX_FRAME != VIDEO_MAX_FRAME
+#endif
+
+#if VB2_MAX_PLANES != VIDEO_MAX_PLANES
+#error VB2_MAX_PLANES != VIDEO_MAX_PLANES
+#endif
+
+/**
+ * struct vb2_v4l2_buffer - video buffer information for v4l2
+ * @vb2_buf: video buffer 2
+ * @flags: buffer informational flags
+ * @field: enum v4l2_field; field order of the image in the buffer
+ * @timestamp: frame timestamp
+ * @timecode: frame timecode
+ * @sequence: sequence count of this frame
+ * Should contain enough information to be able to cover all the fields
+ * of struct v4l2_buffer at videodev2.h
+ */
+struct vb2_v4l2_buffer {
+ struct vb2_buffer vb2_buf;
+
+ __u32 flags;
+ __u32 field;
+ struct timeval timestamp;
+ struct v4l2_timecode timecode;
+ __u32 sequence;
+};
+
+/*
+ * to_vb2_v4l2_buffer() - cast struct vb2_buffer * to struct vb2_v4l2_buffer *
+ */
+#define to_vb2_v4l2_buffer(vb) \
+ container_of(vb, struct vb2_v4l2_buffer, vb2_buf)
+
+int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
+int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
+
+int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
+int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
+
+int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
+int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb);
+int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking);
+
+int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type);
+int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type);
+
+int __must_check vb2_queue_init(struct vb2_queue *q);
+void vb2_queue_release(struct vb2_queue *q);
+
+unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
+size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
+ loff_t *ppos, int nonblock);
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
+ loff_t *ppos, int nonblock);
+
+/*
+ * vb2_thread_fnc - callback function for use with vb2_thread
+ *
+ * This is called whenever a buffer is dequeued in the thread.
+ */
+typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv);
+
+/**
+ * vb2_thread_start() - start a thread for the given queue.
+ * @q: videobuf queue
+ * @fnc: callback function
+ * @priv: priv pointer passed to the callback function
+ * @thread_name:the name of the thread. This will be prefixed with "vb2-".
+ *
+ * This starts a thread that will queue and dequeue until an error occurs
+ * or @vb2_thread_stop is called.
+ *
+ * This function should not be used for anything else but the videobuf2-dvb
+ * support. If you think you have another good use-case for this, then please
+ * contact the linux-media mailinglist first.
+ */
+int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
+ const char *thread_name);
+
+/**
+ * vb2_thread_stop() - stop the thread for the given queue.
+ * @q: videobuf queue
+ */
+int vb2_thread_stop(struct vb2_queue *q);
+
+/*
+ * The following functions are not part of the vb2 core API, but are simple
+ * helper functions that you can use in your struct v4l2_file_operations,
+ * struct v4l2_ioctl_ops and struct vb2_ops. They will serialize if vb2_queue->lock
+ * or video_device->lock is set, and they will set and test vb2_queue->owner
+ * to check if the calling filehandle is permitted to do the queuing operation.
+ */
+
+/* struct v4l2_ioctl_ops helpers */
+
+int vb2_ioctl_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p);
+int vb2_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *p);
+int vb2_ioctl_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *p);
+int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p);
+int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p);
+int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p);
+int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i);
+int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i);
+int vb2_ioctl_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *p);
+
+/* struct v4l2_file_operations helpers */
+
+int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma);
+int vb2_fop_release(struct file *file);
+int _vb2_fop_release(struct file *file, struct mutex *lock);
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos);
+ssize_t vb2_fop_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos);
+unsigned int vb2_fop_poll(struct file *file, poll_table *wait);
+#ifndef CONFIG_MMU
+unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags);
+#endif
+
+/* struct vb2_ops helpers, only use if vq->lock is non-NULL. */
+
+void vb2_ops_wait_prepare(struct vb2_queue *vq);
+void vb2_ops_wait_finish(struct vb2_queue *vq);
+
+#endif /* _MEDIA_VIDEOBUF2_V4L2_H */
diff --git a/include/media/videobuf2-vmalloc.h b/include/media/videobuf2-vmalloc.h
index 93a76b43038d..a63fe662140a 100644
--- a/include/media/videobuf2-vmalloc.h
+++ b/include/media/videobuf2-vmalloc.h
@@ -13,7 +13,7 @@
#ifndef _MEDIA_VIDEOBUF2_VMALLOC_H
#define _MEDIA_VIDEOBUF2_VMALLOC_H
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
extern const struct vb2_mem_ops vb2_vmalloc_memops;
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index a01946514b5a..00b4a6308249 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -514,6 +514,34 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->ret)
);
+TRACE_EVENT(f2fs_background_gc,
+
+ TP_PROTO(struct super_block *sb, long wait_ms,
+ unsigned int prefree, unsigned int free),
+
+ TP_ARGS(sb, wait_ms, prefree, free),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(long, wait_ms)
+ __field(unsigned int, prefree)
+ __field(unsigned int, free)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->wait_ms = wait_ms;
+ __entry->prefree = prefree;
+ __entry->free = free;
+ ),
+
+ TP_printk("dev = (%d,%d), wait_ms = %ld, prefree = %u, free = %u",
+ show_dev(__entry),
+ __entry->wait_ms,
+ __entry->prefree,
+ __entry->free)
+);
+
TRACE_EVENT(f2fs_get_victim,
TP_PROTO(struct super_block *sb, int type, int gc_type,
@@ -1000,6 +1028,32 @@ TRACE_EVENT(f2fs_writepages,
__entry->for_sync)
);
+TRACE_EVENT(f2fs_readpages,
+
+ TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage),
+
+ TP_ARGS(inode, page, nrpage),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(pgoff_t, start)
+ __field(unsigned int, nrpage)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->start = page->index;
+ __entry->nrpage = nrpage;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, start = %lu nrpage = %u",
+ show_dev_ino(__entry),
+ (unsigned long)__entry->start,
+ __entry->nrpage)
+);
+
TRACE_EVENT(f2fs_write_checkpoint,
TP_PROTO(struct super_block *sb, int reason, char *msg),
@@ -1132,17 +1186,19 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
__entry->len)
);
-TRACE_EVENT(f2fs_update_extent_tree,
+TRACE_EVENT(f2fs_update_extent_tree_range,
- TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr),
+ TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr,
+ unsigned int len),
- TP_ARGS(inode, pgofs, blkaddr),
+ TP_ARGS(inode, pgofs, blkaddr, len),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(unsigned int, pgofs)
__field(u32, blk)
+ __field(unsigned int, len)
),
TP_fast_assign(
@@ -1150,12 +1206,15 @@ TRACE_EVENT(f2fs_update_extent_tree,
__entry->ino = inode->i_ino;
__entry->pgofs = pgofs;
__entry->blk = blkaddr;
+ __entry->len = len;
),
- TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, blkaddr = %u",
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+ "blkaddr = %u, len = %u",
show_dev_ino(__entry),
__entry->pgofs,
- __entry->blk)
+ __entry->blk,
+ __entry->len)
);
TRACE_EVENT(f2fs_shrink_extent_tree,
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index a0d008070962..c72f2dc01d0b 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -81,15 +81,47 @@ DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, st
DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl));
-DEFINE_EVENT(filelock_lease, generic_add_lease, TP_PROTO(struct inode *inode, struct file_lock *fl),
- TP_ARGS(inode, fl));
-
DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl));
DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl));
+TRACE_EVENT(generic_add_lease,
+ TP_PROTO(struct inode *inode, struct file_lock *fl),
+
+ TP_ARGS(inode, fl),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(int, wcount)
+ __field(int, dcount)
+ __field(int, icount)
+ __field(dev_t, s_dev)
+ __field(fl_owner_t, fl_owner)
+ __field(unsigned int, fl_flags)
+ __field(unsigned char, fl_type)
+ ),
+
+ TP_fast_assign(
+ __entry->s_dev = inode->i_sb->s_dev;
+ __entry->i_ino = inode->i_ino;
+ __entry->wcount = atomic_read(&inode->i_writecount);
+ __entry->dcount = d_count(fl->fl_file->f_path.dentry);
+ __entry->icount = atomic_read(&inode->i_count);
+ __entry->fl_owner = fl ? fl->fl_owner : NULL;
+ __entry->fl_flags = fl ? fl->fl_flags : 0;
+ __entry->fl_type = fl ? fl->fl_type : 0;
+ ),
+
+ TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d dcount=%d icount=%d fl_owner=0x%p fl_flags=%s fl_type=%s",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
+ __entry->i_ino, __entry->wcount, __entry->dcount,
+ __entry->icount, __entry->fl_owner,
+ show_fl_flags(__entry->fl_flags),
+ show_fl_type(__entry->fl_type))
+);
+
#endif /* _TRACE_FILELOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index dbf017bfddd9..22afa26e34b2 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -5,6 +5,7 @@
#define _TRACE_V4L2_H
#include <linux/tracepoint.h>
+#include <media/videobuf2-v4l2.h>
/* Enums require being exported to userspace, for user tool parsing */
#undef EM
@@ -27,6 +28,7 @@
EM( V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" ) \
EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" ) \
EM( V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" ) \
+ EM( V4L2_BUF_TYPE_SDR_OUTPUT, "SDR_OUTPUT" ) \
EMe(V4L2_BUF_TYPE_PRIVATE, "PRIVATE" )
SHOW_TYPE
@@ -174,17 +176,12 @@ DEFINE_EVENT(v4l2_event_class, v4l2_qbuf,
TP_ARGS(minor, buf)
);
-DECLARE_EVENT_CLASS(vb2_event_class,
+DECLARE_EVENT_CLASS(vb2_v4l2_event_class,
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
TP_ARGS(q, vb),
TP_STRUCT__entry(
__field(int, minor)
- __field(u32, queued_count)
- __field(int, owned_by_drv_count)
- __field(u32, index)
- __field(u32, type)
- __field(u32, bytesused)
__field(u32, flags)
__field(u32, field)
__field(s64, timestamp)
@@ -202,38 +199,30 @@ DECLARE_EVENT_CLASS(vb2_event_class,
),
TP_fast_assign(
- __entry->minor = q->owner ? q->owner->vdev->minor : -1;
- __entry->queued_count = q->queued_count;
- __entry->owned_by_drv_count =
- atomic_read(&q->owned_by_drv_count);
- __entry->index = vb->v4l2_buf.index;
- __entry->type = vb->v4l2_buf.type;
- __entry->bytesused = vb->v4l2_planes[0].bytesused;
- __entry->flags = vb->v4l2_buf.flags;
- __entry->field = vb->v4l2_buf.field;
- __entry->timestamp = timeval_to_ns(&vb->v4l2_buf.timestamp);
- __entry->timecode_type = vb->v4l2_buf.timecode.type;
- __entry->timecode_flags = vb->v4l2_buf.timecode.flags;
- __entry->timecode_frames = vb->v4l2_buf.timecode.frames;
- __entry->timecode_seconds = vb->v4l2_buf.timecode.seconds;
- __entry->timecode_minutes = vb->v4l2_buf.timecode.minutes;
- __entry->timecode_hours = vb->v4l2_buf.timecode.hours;
- __entry->timecode_userbits0 = vb->v4l2_buf.timecode.userbits[0];
- __entry->timecode_userbits1 = vb->v4l2_buf.timecode.userbits[1];
- __entry->timecode_userbits2 = vb->v4l2_buf.timecode.userbits[2];
- __entry->timecode_userbits3 = vb->v4l2_buf.timecode.userbits[3];
- __entry->sequence = vb->v4l2_buf.sequence;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct v4l2_fh *owner = q->owner;
+
+ __entry->minor = owner ? owner->vdev->minor : -1;
+ __entry->flags = vbuf->flags;
+ __entry->field = vbuf->field;
+ __entry->timestamp = timeval_to_ns(&vbuf->timestamp);
+ __entry->timecode_type = vbuf->timecode.type;
+ __entry->timecode_flags = vbuf->timecode.flags;
+ __entry->timecode_frames = vbuf->timecode.frames;
+ __entry->timecode_seconds = vbuf->timecode.seconds;
+ __entry->timecode_minutes = vbuf->timecode.minutes;
+ __entry->timecode_hours = vbuf->timecode.hours;
+ __entry->timecode_userbits0 = vbuf->timecode.userbits[0];
+ __entry->timecode_userbits1 = vbuf->timecode.userbits[1];
+ __entry->timecode_userbits2 = vbuf->timecode.userbits[2];
+ __entry->timecode_userbits3 = vbuf->timecode.userbits[3];
+ __entry->sequence = vbuf->sequence;
),
- TP_printk("minor = %d, queued = %u, owned_by_drv = %d, index = %u, "
- "type = %s, bytesused = %u, flags = %s, field = %s, "
+ TP_printk("minor=%d flags = %s, field = %s, "
"timestamp = %llu, timecode = { type = %s, flags = %s, "
"frames = %u, seconds = %u, minutes = %u, hours = %u, "
"userbits = { %u %u %u %u } }, sequence = %u", __entry->minor,
- __entry->queued_count,
- __entry->owned_by_drv_count,
- __entry->index, show_type(__entry->type),
- __entry->bytesused,
show_flags(__entry->flags),
show_field(__entry->field),
__entry->timestamp,
@@ -251,22 +240,22 @@ DECLARE_EVENT_CLASS(vb2_event_class,
)
)
-DEFINE_EVENT(vb2_event_class, vb2_buf_done,
+DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_done,
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
TP_ARGS(q, vb)
);
-DEFINE_EVENT(vb2_event_class, vb2_buf_queue,
+DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_queue,
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
TP_ARGS(q, vb)
);
-DEFINE_EVENT(vb2_event_class, vb2_dqbuf,
+DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_dqbuf,
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
TP_ARGS(q, vb)
);
-DEFINE_EVENT(vb2_event_class, vb2_qbuf,
+DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_qbuf,
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
TP_ARGS(q, vb)
);
diff --git a/include/trace/events/vb2.h b/include/trace/events/vb2.h
new file mode 100644
index 000000000000..bfeceeba3744
--- /dev/null
+++ b/include/trace/events/vb2.h
@@ -0,0 +1,65 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vb2
+
+#if !defined(_TRACE_VB2_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VB2_H
+
+#include <linux/tracepoint.h>
+#include <media/videobuf2-core.h>
+
+DECLARE_EVENT_CLASS(vb2_event_class,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb),
+
+ TP_STRUCT__entry(
+ __field(void *, owner)
+ __field(u32, queued_count)
+ __field(int, owned_by_drv_count)
+ __field(u32, index)
+ __field(u32, type)
+ __field(u32, bytesused)
+ ),
+
+ TP_fast_assign(
+ __entry->owner = q->owner;
+ __entry->queued_count = q->queued_count;
+ __entry->owned_by_drv_count =
+ atomic_read(&q->owned_by_drv_count);
+ __entry->index = vb->index;
+ __entry->type = vb->type;
+ __entry->bytesused = vb->planes[0].bytesused;
+ ),
+
+ TP_printk("owner = %p, queued = %u, owned_by_drv = %d, index = %u, "
+ "type = %u, bytesused = %u", __entry->owner,
+ __entry->queued_count,
+ __entry->owned_by_drv_count,
+ __entry->index, __entry->type,
+ __entry->bytesused
+ )
+)
+
+DEFINE_EVENT(vb2_event_class, vb2_buf_done,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_buf_queue,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_dqbuf,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_qbuf,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
+
+#endif /* if !defined(_TRACE_VB2_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a9256f0331ae..03f3618612aa 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -183,6 +183,7 @@ struct kvm_s390_skeys {
#define KVM_EXIT_EPR 23
#define KVM_EXIT_SYSTEM_EVENT 24
#define KVM_EXIT_S390_STSI 25
+#define KVM_EXIT_IOAPIC_EOI 26
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -333,6 +334,10 @@ struct kvm_run {
__u8 sel1;
__u16 sel2;
} s390_stsi;
+ /* KVM_EXIT_IOAPIC_EOI */
+ struct {
+ __u8 vector;
+ } eoi;
/* Fix the size of the union. */
char padding[256];
};
@@ -824,6 +829,8 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_MULTI_ADDRESS_SPACE 118
#define KVM_CAP_GUEST_DEBUG_HW_BPS 119
#define KVM_CAP_GUEST_DEBUG_HW_WPS 120
+#define KVM_CAP_SPLIT_IRQCHIP 121
+#define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index d448c536b49d..1bdce501ad6b 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -936,6 +936,7 @@ enum v4l2_deemphasis {
#define V4L2_CID_RF_TUNER_BANDWIDTH_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 11)
#define V4L2_CID_RF_TUNER_BANDWIDTH (V4L2_CID_RF_TUNER_CLASS_BASE + 12)
+#define V4L2_CID_RF_TUNER_RF_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 32)
#define V4L2_CID_RF_TUNER_LNA_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 41)
#define V4L2_CID_RF_TUNER_LNA_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 42)
#define V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 51)
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 3228fbebcd63..a0e87d16b726 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -145,6 +145,7 @@ enum v4l2_buf_type {
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10,
V4L2_BUF_TYPE_SDR_CAPTURE = 11,
+ V4L2_BUF_TYPE_SDR_OUTPUT = 12,
/* Deprecated, do not use */
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
@@ -159,16 +160,20 @@ enum v4l2_buf_type {
|| (type) == V4L2_BUF_TYPE_VIDEO_OVERLAY \
|| (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY \
|| (type) == V4L2_BUF_TYPE_VBI_OUTPUT \
- || (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT)
+ || (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT \
+ || (type) == V4L2_BUF_TYPE_SDR_OUTPUT)
enum v4l2_tuner_type {
V4L2_TUNER_RADIO = 1,
V4L2_TUNER_ANALOG_TV = 2,
V4L2_TUNER_DIGITAL_TV = 3,
- V4L2_TUNER_ADC = 4,
+ V4L2_TUNER_SDR = 4,
V4L2_TUNER_RF = 5,
};
+/* Deprecated, do not use */
+#define V4L2_TUNER_ADC V4L2_TUNER_SDR
+
enum v4l2_memory {
V4L2_MEMORY_MMAP = 1,
V4L2_MEMORY_USERPTR = 2,
@@ -229,6 +234,9 @@ enum v4l2_colorspace {
/* Raw colorspace: for RAW unprocessed images */
V4L2_COLORSPACE_RAW = 11,
+
+ /* DCI-P3 colorspace, used by cinema projectors */
+ V4L2_COLORSPACE_DCI_P3 = 12,
};
/*
@@ -256,6 +264,8 @@ enum v4l2_xfer_func {
* V4L2_COLORSPACE_SMPTE240M: V4L2_XFER_FUNC_SMPTE240M
*
* V4L2_COLORSPACE_RAW: V4L2_XFER_FUNC_NONE
+ *
+ * V4L2_COLORSPACE_DCI_P3: V4L2_XFER_FUNC_DCI_P3
*/
V4L2_XFER_FUNC_DEFAULT = 0,
V4L2_XFER_FUNC_709 = 1,
@@ -263,6 +273,8 @@ enum v4l2_xfer_func {
V4L2_XFER_FUNC_ADOBERGB = 3,
V4L2_XFER_FUNC_SMPTE240M = 4,
V4L2_XFER_FUNC_NONE = 5,
+ V4L2_XFER_FUNC_DCI_P3 = 6,
+ V4L2_XFER_FUNC_SMPTE2084 = 7,
};
/*
@@ -272,9 +284,10 @@ enum v4l2_xfer_func {
#define V4L2_MAP_XFER_FUNC_DEFAULT(colsp) \
((colsp) == V4L2_COLORSPACE_ADOBERGB ? V4L2_XFER_FUNC_ADOBERGB : \
((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_XFER_FUNC_SMPTE240M : \
- ((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \
- ((colsp) == V4L2_COLORSPACE_SRGB || (colsp) == V4L2_COLORSPACE_JPEG ? \
- V4L2_XFER_FUNC_SRGB : V4L2_XFER_FUNC_709))))
+ ((colsp) == V4L2_COLORSPACE_DCI_P3 ? V4L2_XFER_FUNC_DCI_P3 : \
+ ((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \
+ ((colsp) == V4L2_COLORSPACE_SRGB || (colsp) == V4L2_COLORSPACE_JPEG ? \
+ V4L2_XFER_FUNC_SRGB : V4L2_XFER_FUNC_709)))))
enum v4l2_ycbcr_encoding {
/*
@@ -285,7 +298,7 @@ enum v4l2_ycbcr_encoding {
* V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_ADOBERGB and
* V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
*
- * V4L2_COLORSPACE_REC709: V4L2_YCBCR_ENC_709
+ * V4L2_COLORSPACE_REC709 and V4L2_COLORSPACE_DCI_P3: V4L2_YCBCR_ENC_709
*
* V4L2_COLORSPACE_SRGB: V4L2_YCBCR_ENC_SYCC
*
@@ -325,7 +338,8 @@ enum v4l2_ycbcr_encoding {
* This depends on the colorspace.
*/
#define V4L2_MAP_YCBCR_ENC_DEFAULT(colsp) \
- ((colsp) == V4L2_COLORSPACE_REC709 ? V4L2_YCBCR_ENC_709 : \
+ (((colsp) == V4L2_COLORSPACE_REC709 || \
+ (colsp) == V4L2_COLORSPACE_DCI_P3) ? V4L2_YCBCR_ENC_709 : \
((colsp) == V4L2_COLORSPACE_BT2020 ? V4L2_YCBCR_ENC_BT2020 : \
((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_YCBCR_ENC_SMPTE240M : \
V4L2_YCBCR_ENC_601)))
@@ -423,6 +437,7 @@ struct v4l2_capability {
#define V4L2_CAP_SDR_CAPTURE 0x00100000 /* Is a SDR capture device */
#define V4L2_CAP_EXT_PIX_FORMAT 0x00200000 /* Supports the extended pixel format */
+#define V4L2_CAP_SDR_OUTPUT 0x00400000 /* Is a SDR output device */
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
@@ -1578,7 +1593,8 @@ struct v4l2_modulator {
__u32 rangelow;
__u32 rangehigh;
__u32 txsubchans;
- __u32 reserved[4];
+ __u32 type; /* enum v4l2_tuner_type */
+ __u32 reserved[3];
};
/* Flags for the 'capability' field */
@@ -2271,7 +2287,7 @@ struct v4l2_create_buffers {
#define VIDIOC_QUERY_EXT_CTRL _IOWR('V', 103, struct v4l2_query_ext_ctrl)
/* Reminder: when adding new ioctls please add support for them to
- drivers/media/video/v4l2-compat-ioctl32.c as well! */
+ drivers/media/v4l2-core/v4l2-compat-ioctl32.c as well! */
#define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */
diff --git a/kernel/.gitignore b/kernel/.gitignore
index 790d83c7d160..b3097bde4e9c 100644
--- a/kernel/.gitignore
+++ b/kernel/.gitignore
@@ -5,4 +5,3 @@ config_data.h
config_data.gz
timeconst.h
hz.bc
-x509_certificate_list
diff --git a/kernel/audit.c b/kernel/audit.c
index 662c007635fb..8a056a32ded7 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -407,16 +407,33 @@ static void audit_printk_skb(struct sk_buff *skb)
static void kauditd_send_skb(struct sk_buff *skb)
{
int err;
+ int attempts = 0;
+#define AUDITD_RETRIES 5
+
+restart:
/* take a reference in case we can't send it and we want to hold it */
skb_get(skb);
err = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
if (err < 0) {
- BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */
+ pr_err("netlink_unicast sending to audit_pid=%d returned error: %d\n",
+ audit_pid, err);
if (audit_pid) {
- pr_err("*NO* daemon at audit_pid=%d\n", audit_pid);
- audit_log_lost("auditd disappeared");
- audit_pid = 0;
- audit_sock = NULL;
+ if (err == -ECONNREFUSED || err == -EPERM
+ || ++attempts >= AUDITD_RETRIES) {
+ char s[32];
+
+ snprintf(s, sizeof(s), "audit_pid=%d reset", audit_pid);
+ audit_log_lost(s);
+ audit_pid = 0;
+ audit_sock = NULL;
+ } else {
+ pr_warn("re-scheduling(#%d) write to audit_pid=%d\n",
+ attempts, audit_pid);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ goto restart;
+ }
}
/* we might get lucky and get this in the next auditd */
audit_hold_skb(skb);
@@ -684,25 +701,22 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
return err;
}
-static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
+static void audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
{
- int rc = 0;
uid_t uid = from_kuid(&init_user_ns, current_uid());
pid_t pid = task_tgid_nr(current);
if (!audit_enabled && msg_type != AUDIT_USER_AVC) {
*ab = NULL;
- return rc;
+ return;
}
*ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
if (unlikely(!*ab))
- return rc;
+ return;
audit_log_format(*ab, "pid=%d uid=%u", pid, uid);
audit_log_session_info(*ab);
audit_log_task_context(*ab);
-
- return rc;
}
int is_audit_feature_set(int i)
@@ -1566,14 +1580,14 @@ void audit_log_n_string(struct audit_buffer *ab, const char *string,
* @string: string to be checked
* @len: max length of the string to check
*/
-int audit_string_contains_control(const char *string, size_t len)
+bool audit_string_contains_control(const char *string, size_t len)
{
const unsigned char *p;
for (p = string; p < (const unsigned char *)string + len; p++) {
if (*p == '"' || *p < 0x21 || *p > 0x7e)
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/**
diff --git a/kernel/audit.h b/kernel/audit.h
index dadf86a0e59e..de6cbb7cf547 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -301,7 +301,7 @@ extern int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark
#ifdef CONFIG_AUDIT_TREE
extern struct audit_chunk *audit_tree_lookup(const struct inode *);
extern void audit_put_chunk(struct audit_chunk *);
-extern int audit_tree_match(struct audit_chunk *, struct audit_tree *);
+extern bool audit_tree_match(struct audit_chunk *, struct audit_tree *);
extern int audit_make_tree(struct audit_krule *, char *, u32);
extern int audit_add_tree_rule(struct audit_krule *);
extern int audit_remove_tree_rule(struct audit_krule *);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 94ecdabda8e6..5efe9b299a12 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -197,13 +197,13 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
return NULL;
}
-int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
+bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
{
int n;
for (n = 0; n < chunk->count; n++)
if (chunk->owners[n].owner == tree)
- return 1;
- return 0;
+ return true;
+ return false;
}
/* tagging and untagging inodes with trees */
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 7714d93edb85..b8ff9e193753 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -39,13 +39,13 @@
* Locking model:
*
* audit_filter_mutex:
- * Synchronizes writes and blocking reads of audit's filterlist
- * data. Rcu is used to traverse the filterlist and access
- * contents of structs audit_entry, audit_watch and opaque
- * LSM rules during filtering. If modified, these structures
- * must be copied and replace their counterparts in the filterlist.
- * An audit_parent struct is not accessed during filtering, so may
- * be written directly provided audit_filter_mutex is held.
+ * Synchronizes writes and blocking reads of audit's filterlist
+ * data. Rcu is used to traverse the filterlist and access
+ * contents of structs audit_entry, audit_watch and opaque
+ * LSM rules during filtering. If modified, these structures
+ * must be copied and replace their counterparts in the filterlist.
+ * An audit_parent struct is not accessed during filtering, so may
+ * be written directly provided audit_filter_mutex is held.
*/
/* Audit filter lists, defined in <linux/audit.h> */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2c9eae6ad970..b9d0cce3f9ce 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -45,7 +45,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/rwsem.h>
+#include <linux/percpu-rwsem.h>
#include <linux/string.h>
#include <linux/sort.h>
#include <linux/kmod.h>
@@ -75,7 +75,7 @@
* cgroup_mutex is the master lock. Any modification to cgroup or its
* hierarchy must be performed while holding it.
*
- * css_set_rwsem protects task->cgroups pointer, the list of css_set
+ * css_set_lock protects task->cgroups pointer, the list of css_set
* objects, and the chain of tasks off each css_set.
*
* These locks are exported if CONFIG_PROVE_RCU so that accessors in
@@ -83,12 +83,12 @@
*/
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
-DECLARE_RWSEM(css_set_rwsem);
+DEFINE_SPINLOCK(css_set_lock);
EXPORT_SYMBOL_GPL(cgroup_mutex);
-EXPORT_SYMBOL_GPL(css_set_rwsem);
+EXPORT_SYMBOL_GPL(css_set_lock);
#else
static DEFINE_MUTEX(cgroup_mutex);
-static DECLARE_RWSEM(css_set_rwsem);
+static DEFINE_SPINLOCK(css_set_lock);
#endif
/*
@@ -103,6 +103,8 @@ static DEFINE_SPINLOCK(cgroup_idr_lock);
*/
static DEFINE_SPINLOCK(release_agent_path_lock);
+struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+
#define cgroup_assert_mutex_or_rcu_locked() \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&cgroup_mutex), \
@@ -136,6 +138,27 @@ static const char *cgroup_subsys_name[] = {
};
#undef SUBSYS
+/* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
+#define SUBSYS(_x) \
+ DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key); \
+ DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key); \
+ EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key); \
+ EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
+#include <linux/cgroup_subsys.h>
+#undef SUBSYS
+
+#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
+static struct static_key_true *cgroup_subsys_enabled_key[] = {
+#include <linux/cgroup_subsys.h>
+};
+#undef SUBSYS
+
+#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
+static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
+#include <linux/cgroup_subsys.h>
+};
+#undef SUBSYS
+
/*
* The default hierarchy, reserved for the subsystems that are otherwise
* unattached - it never has more than a single cgroup, and all tasks are
@@ -150,12 +173,6 @@ EXPORT_SYMBOL_GPL(cgrp_dfl_root);
*/
static bool cgrp_dfl_root_visible;
-/*
- * Set by the boot param of the same name and makes subsystems with NULL
- * ->dfl_files to use ->legacy_files on the default hierarchy.
- */
-static bool cgroup_legacy_files_on_dfl;
-
/* some controllers are not supported in the default hierarchy */
static unsigned long cgrp_dfl_root_inhibit_ss_mask;
@@ -183,6 +200,7 @@ static u64 css_serial_nr_next = 1;
*/
static unsigned long have_fork_callback __read_mostly;
static unsigned long have_exit_callback __read_mostly;
+static unsigned long have_free_callback __read_mostly;
/* Ditto for the can_fork callback. */
static unsigned long have_canfork_callback __read_mostly;
@@ -192,14 +210,87 @@ static struct cftype cgroup_legacy_base_files[];
static int rebind_subsystems(struct cgroup_root *dst_root,
unsigned long ss_mask);
+static void css_task_iter_advance(struct css_task_iter *it);
static int cgroup_destroy_locked(struct cgroup *cgrp);
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
bool visible);
static void css_release(struct percpu_ref *ref);
static void kill_css(struct cgroup_subsys_state *css);
-static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
+static int cgroup_addrm_files(struct cgroup_subsys_state *css,
+ struct cgroup *cgrp, struct cftype cfts[],
bool is_add);
+/**
+ * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
+ * @ssid: subsys ID of interest
+ *
+ * cgroup_subsys_enabled() can only be used with literal subsys names which
+ * is fine for individual subsystems but unsuitable for cgroup core. This
+ * is slower static_key_enabled() based test indexed by @ssid.
+ */
+static bool cgroup_ssid_enabled(int ssid)
+{
+ return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
+}
+
+/**
+ * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
+ * @cgrp: the cgroup of interest
+ *
+ * The default hierarchy is the v2 interface of cgroup and this function
+ * can be used to test whether a cgroup is on the default hierarchy for
+ * cases where a subsystem should behave differnetly depending on the
+ * interface version.
+ *
+ * The set of behaviors which change on the default hierarchy are still
+ * being determined and the mount option is prefixed with __DEVEL__.
+ *
+ * List of changed behaviors:
+ *
+ * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
+ * and "name" are disallowed.
+ *
+ * - When mounting an existing superblock, mount options should match.
+ *
+ * - Remount is disallowed.
+ *
+ * - rename(2) is disallowed.
+ *
+ * - "tasks" is removed. Everything should be at process granularity. Use
+ * "cgroup.procs" instead.
+ *
+ * - "cgroup.procs" is not sorted. pids will be unique unless they got
+ * recycled inbetween reads.
+ *
+ * - "release_agent" and "notify_on_release" are removed. Replacement
+ * notification mechanism will be implemented.
+ *
+ * - "cgroup.clone_children" is removed.
+ *
+ * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup
+ * and its descendants contain no task; otherwise, 1. The file also
+ * generates kernfs notification which can be monitored through poll and
+ * [di]notify when the value of the file changes.
+ *
+ * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
+ * take masks of ancestors with non-empty cpus/mems, instead of being
+ * moved to an ancestor.
+ *
+ * - cpuset: a task can be moved into an empty cpuset, and again it takes
+ * masks of ancestors.
+ *
+ * - memcg: use_hierarchy is on by default and the cgroup file for the flag
+ * is not created.
+ *
+ * - blkcg: blk-throttle becomes properly hierarchical.
+ *
+ * - debug: disallowed on the default hierarchy.
+ */
+static bool cgroup_on_dfl(const struct cgroup *cgrp)
+{
+ return cgrp->root == &cgrp_dfl_root;
+}
+
/* IDR wrappers which synchronize using cgroup_idr_lock */
static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
gfp_t gfp_mask)
@@ -332,6 +423,22 @@ static inline bool cgroup_is_dead(const struct cgroup *cgrp)
return !(cgrp->self.flags & CSS_ONLINE);
}
+static void cgroup_get(struct cgroup *cgrp)
+{
+ WARN_ON_ONCE(cgroup_is_dead(cgrp));
+ css_get(&cgrp->self);
+}
+
+static bool cgroup_tryget(struct cgroup *cgrp)
+{
+ return css_tryget(&cgrp->self);
+}
+
+static void cgroup_put(struct cgroup *cgrp)
+{
+ css_put(&cgrp->self);
+}
+
struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
{
struct cgroup *cgrp = of->kn->parent->priv;
@@ -481,19 +588,31 @@ struct css_set init_css_set = {
.mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
.mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node),
.mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
+ .task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
};
static int css_set_count = 1; /* 1 for init_css_set */
/**
+ * css_set_populated - does a css_set contain any tasks?
+ * @cset: target css_set
+ */
+static bool css_set_populated(struct css_set *cset)
+{
+ lockdep_assert_held(&css_set_lock);
+
+ return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
+}
+
+/**
* cgroup_update_populated - updated populated count of a cgroup
* @cgrp: the target cgroup
* @populated: inc or dec populated count
*
- * @cgrp is either getting the first task (css_set) or losing the last.
- * Update @cgrp->populated_cnt accordingly. The count is propagated
- * towards root so that a given cgroup's populated_cnt is zero iff the
- * cgroup and all its descendants are empty.
+ * One of the css_sets associated with @cgrp is either getting its first
+ * task or losing the last. Update @cgrp->populated_cnt accordingly. The
+ * count is propagated towards root so that a given cgroup's populated_cnt
+ * is zero iff the cgroup and all its descendants don't contain any tasks.
*
* @cgrp's interface file "cgroup.populated" is zero if
* @cgrp->populated_cnt is zero and 1 otherwise. When @cgrp->populated_cnt
@@ -503,7 +622,7 @@ static int css_set_count = 1; /* 1 for init_css_set */
*/
static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
{
- lockdep_assert_held(&css_set_rwsem);
+ lockdep_assert_held(&css_set_lock);
do {
bool trigger;
@@ -516,12 +635,93 @@ static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
if (!trigger)
break;
- if (cgrp->populated_kn)
- kernfs_notify(cgrp->populated_kn);
+ check_for_release(cgrp);
+ cgroup_file_notify(&cgrp->events_file);
+
cgrp = cgroup_parent(cgrp);
} while (cgrp);
}
+/**
+ * css_set_update_populated - update populated state of a css_set
+ * @cset: target css_set
+ * @populated: whether @cset is populated or depopulated
+ *
+ * @cset is either getting the first task or losing the last. Update the
+ * ->populated_cnt of all associated cgroups accordingly.
+ */
+static void css_set_update_populated(struct css_set *cset, bool populated)
+{
+ struct cgrp_cset_link *link;
+
+ lockdep_assert_held(&css_set_lock);
+
+ list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
+ cgroup_update_populated(link->cgrp, populated);
+}
+
+/**
+ * css_set_move_task - move a task from one css_set to another
+ * @task: task being moved
+ * @from_cset: css_set @task currently belongs to (may be NULL)
+ * @to_cset: new css_set @task is being moved to (may be NULL)
+ * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
+ *
+ * Move @task from @from_cset to @to_cset. If @task didn't belong to any
+ * css_set, @from_cset can be NULL. If @task is being disassociated
+ * instead of moved, @to_cset can be NULL.
+ *
+ * This function automatically handles populated_cnt updates and
+ * css_task_iter adjustments but the caller is responsible for managing
+ * @from_cset and @to_cset's reference counts.
+ */
+static void css_set_move_task(struct task_struct *task,
+ struct css_set *from_cset, struct css_set *to_cset,
+ bool use_mg_tasks)
+{
+ lockdep_assert_held(&css_set_lock);
+
+ if (from_cset) {
+ struct css_task_iter *it, *pos;
+
+ WARN_ON_ONCE(list_empty(&task->cg_list));
+
+ /*
+ * @task is leaving, advance task iterators which are
+ * pointing to it so that they can resume at the next
+ * position. Advancing an iterator might remove it from
+ * the list, use safe walk. See css_task_iter_advance*()
+ * for details.
+ */
+ list_for_each_entry_safe(it, pos, &from_cset->task_iters,
+ iters_node)
+ if (it->task_pos == &task->cg_list)
+ css_task_iter_advance(it);
+
+ list_del_init(&task->cg_list);
+ if (!css_set_populated(from_cset))
+ css_set_update_populated(from_cset, false);
+ } else {
+ WARN_ON_ONCE(!list_empty(&task->cg_list));
+ }
+
+ if (to_cset) {
+ /*
+ * We are synchronized through cgroup_threadgroup_rwsem
+ * against PF_EXITING setting such that we can't race
+ * against cgroup_exit() changing the css_set to
+ * init_css_set and dropping the old one.
+ */
+ WARN_ON_ONCE(task->flags & PF_EXITING);
+
+ if (!css_set_populated(to_cset))
+ css_set_update_populated(to_cset, true);
+ rcu_assign_pointer(task->cgroups, to_cset);
+ list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
+ &to_cset->tasks);
+ }
+}
+
/*
* hash table for cgroup groups. This improves the performance to find
* an existing css_set. This hash doesn't (currently) take into
@@ -549,7 +749,7 @@ static void put_css_set_locked(struct css_set *cset)
struct cgroup_subsys *ss;
int ssid;
- lockdep_assert_held(&css_set_rwsem);
+ lockdep_assert_held(&css_set_lock);
if (!atomic_dec_and_test(&cset->refcount))
return;
@@ -561,17 +761,10 @@ static void put_css_set_locked(struct css_set *cset)
css_set_count--;
list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
- struct cgroup *cgrp = link->cgrp;
-
list_del(&link->cset_link);
list_del(&link->cgrp_link);
-
- /* @cgrp can't go away while we're holding css_set_rwsem */
- if (list_empty(&cgrp->cset_links)) {
- cgroup_update_populated(cgrp, false);
- check_for_release(cgrp);
- }
-
+ if (cgroup_parent(link->cgrp))
+ cgroup_put(link->cgrp);
kfree(link);
}
@@ -588,9 +781,9 @@ static void put_css_set(struct css_set *cset)
if (atomic_add_unless(&cset->refcount, -1, 1))
return;
- down_write(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
put_css_set_locked(cset);
- up_write(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
}
/*
@@ -779,15 +972,15 @@ static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
link->cset = cset;
link->cgrp = cgrp;
- if (list_empty(&cgrp->cset_links))
- cgroup_update_populated(cgrp, true);
- list_move(&link->cset_link, &cgrp->cset_links);
-
/*
- * Always add links to the tail of the list so that the list
- * is sorted by order of hierarchy creation
+ * Always add links to the tail of the lists so that the lists are
+ * in choronological order.
*/
+ list_move_tail(&link->cset_link, &cgrp->cset_links);
list_add_tail(&link->cgrp_link, &cset->cgrp_links);
+
+ if (cgroup_parent(cgrp))
+ cgroup_get(cgrp);
}
/**
@@ -813,11 +1006,11 @@ static struct css_set *find_css_set(struct css_set *old_cset,
/* First see if we already have a cgroup group that matches
* the desired set */
- down_read(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
cset = find_existing_css_set(old_cset, cgrp, template);
if (cset)
get_css_set(cset);
- up_read(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
if (cset)
return cset;
@@ -838,13 +1031,14 @@ static struct css_set *find_css_set(struct css_set *old_cset,
INIT_LIST_HEAD(&cset->mg_tasks);
INIT_LIST_HEAD(&cset->mg_preload_node);
INIT_LIST_HEAD(&cset->mg_node);
+ INIT_LIST_HEAD(&cset->task_iters);
INIT_HLIST_NODE(&cset->hlist);
/* Copy the set of subsystem state objects generated in
* find_existing_css_set() */
memcpy(cset->subsys, template, sizeof(cset->subsys));
- down_write(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
/* Add reference counts and links from the new css_set. */
list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
struct cgroup *c = link->cgrp;
@@ -866,53 +1060,11 @@ static struct css_set *find_css_set(struct css_set *old_cset,
list_add_tail(&cset->e_cset_node[ssid],
&cset->subsys[ssid]->cgroup->e_csets[ssid]);
- up_write(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
return cset;
}
-void cgroup_threadgroup_change_begin(struct task_struct *tsk)
-{
- down_read(&tsk->signal->group_rwsem);
-}
-
-void cgroup_threadgroup_change_end(struct task_struct *tsk)
-{
- up_read(&tsk->signal->group_rwsem);
-}
-
-/**
- * threadgroup_lock - lock threadgroup
- * @tsk: member task of the threadgroup to lock
- *
- * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
- * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
- * change ->group_leader/pid. This is useful for cases where the threadgroup
- * needs to stay stable across blockable operations.
- *
- * fork and exit explicitly call threadgroup_change_{begin|end}() for
- * synchronization. While held, no new task will be added to threadgroup
- * and no existing live task will have its PF_EXITING set.
- *
- * de_thread() does threadgroup_change_{begin|end}() when a non-leader
- * sub-thread becomes a new leader.
- */
-static void threadgroup_lock(struct task_struct *tsk)
-{
- down_write(&tsk->signal->group_rwsem);
-}
-
-/**
- * threadgroup_unlock - unlock threadgroup
- * @tsk: member task of the threadgroup to unlock
- *
- * Reverse threadgroup_lock().
- */
-static inline void threadgroup_unlock(struct task_struct *tsk)
-{
- up_write(&tsk->signal->group_rwsem);
-}
-
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
{
struct cgroup *root_cgrp = kf_root->kn->priv;
@@ -972,14 +1124,15 @@ static void cgroup_destroy_root(struct cgroup_root *root)
* Release all the links from cset_links to this hierarchy's
* root cgroup
*/
- down_write(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
list_del(&link->cset_link);
list_del(&link->cgrp_link);
kfree(link);
}
- up_write(&css_set_rwsem);
+
+ spin_unlock_bh(&css_set_lock);
if (!list_empty(&root->root_list)) {
list_del(&root->root_list);
@@ -1001,7 +1154,7 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
struct cgroup *res = NULL;
lockdep_assert_held(&cgroup_mutex);
- lockdep_assert_held(&css_set_rwsem);
+ lockdep_assert_held(&css_set_lock);
if (cset == &init_css_set) {
res = &root->cgrp;
@@ -1024,7 +1177,7 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
/*
* Return the cgroup for "task" from the given hierarchy. Must be
- * called with cgroup_mutex and css_set_rwsem held.
+ * called with cgroup_mutex and css_set_lock held.
*/
static struct cgroup *task_cgroup_from_root(struct task_struct *task,
struct cgroup_root *root)
@@ -1063,7 +1216,6 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
* update of a tasks cgroup pointer by cgroup_attach_task()
*/
-static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
static const struct file_operations proc_cgroupstats_operations;
@@ -1086,43 +1238,25 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
* cgroup_file_mode - deduce file mode of a control file
* @cft: the control file in question
*
- * returns cft->mode if ->mode is not 0
- * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
- * returns S_IRUGO if it has only a read handler
- * returns S_IWUSR if it has only a write hander
+ * S_IRUGO for read, S_IWUSR for write.
*/
static umode_t cgroup_file_mode(const struct cftype *cft)
{
umode_t mode = 0;
- if (cft->mode)
- return cft->mode;
-
if (cft->read_u64 || cft->read_s64 || cft->seq_show)
mode |= S_IRUGO;
- if (cft->write_u64 || cft->write_s64 || cft->write)
- mode |= S_IWUSR;
+ if (cft->write_u64 || cft->write_s64 || cft->write) {
+ if (cft->flags & CFTYPE_WORLD_WRITABLE)
+ mode |= S_IWUGO;
+ else
+ mode |= S_IWUSR;
+ }
return mode;
}
-static void cgroup_get(struct cgroup *cgrp)
-{
- WARN_ON_ONCE(cgroup_is_dead(cgrp));
- css_get(&cgrp->self);
-}
-
-static bool cgroup_tryget(struct cgroup *cgrp)
-{
- return css_tryget(&cgrp->self);
-}
-
-static void cgroup_put(struct cgroup *cgrp)
-{
- css_put(&cgrp->self);
-}
-
/**
* cgroup_calc_child_subsys_mask - calculate child_subsys_mask
* @cgrp: the target cgroup
@@ -1263,28 +1397,64 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
}
/**
- * cgroup_clear_dir - remove subsys files in a cgroup directory
- * @cgrp: target cgroup
- * @subsys_mask: mask of the subsystem ids whose files should be removed
+ * css_clear_dir - remove subsys files in a cgroup directory
+ * @css: taget css
+ * @cgrp_override: specify if target cgroup is different from css->cgroup
*/
-static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
+static void css_clear_dir(struct cgroup_subsys_state *css,
+ struct cgroup *cgrp_override)
{
- struct cgroup_subsys *ss;
- int i;
+ struct cgroup *cgrp = cgrp_override ?: css->cgroup;
+ struct cftype *cfts;
- for_each_subsys(ss, i) {
- struct cftype *cfts;
+ list_for_each_entry(cfts, &css->ss->cfts, node)
+ cgroup_addrm_files(css, cgrp, cfts, false);
+}
- if (!(subsys_mask & (1 << i)))
- continue;
- list_for_each_entry(cfts, &ss->cfts, node)
- cgroup_addrm_files(cgrp, cfts, false);
+/**
+ * css_populate_dir - create subsys files in a cgroup directory
+ * @css: target css
+ * @cgrp_overried: specify if target cgroup is different from css->cgroup
+ *
+ * On failure, no file is added.
+ */
+static int css_populate_dir(struct cgroup_subsys_state *css,
+ struct cgroup *cgrp_override)
+{
+ struct cgroup *cgrp = cgrp_override ?: css->cgroup;
+ struct cftype *cfts, *failed_cfts;
+ int ret;
+
+ if (!css->ss) {
+ if (cgroup_on_dfl(cgrp))
+ cfts = cgroup_dfl_base_files;
+ else
+ cfts = cgroup_legacy_base_files;
+
+ return cgroup_addrm_files(&cgrp->self, cgrp, cfts, true);
+ }
+
+ list_for_each_entry(cfts, &css->ss->cfts, node) {
+ ret = cgroup_addrm_files(css, cgrp, cfts, true);
+ if (ret < 0) {
+ failed_cfts = cfts;
+ goto err;
+ }
}
+ return 0;
+err:
+ list_for_each_entry(cfts, &css->ss->cfts, node) {
+ if (cfts == failed_cfts)
+ break;
+ cgroup_addrm_files(css, cgrp, cfts, false);
+ }
+ return ret;
}
static int rebind_subsystems(struct cgroup_root *dst_root,
unsigned long ss_mask)
{
+ struct cgroup *dcgrp = &dst_root->cgrp;
struct cgroup_subsys *ss;
unsigned long tmp_ss_mask;
int ssid, i, ret;
@@ -1306,10 +1476,13 @@ static int rebind_subsystems(struct cgroup_root *dst_root,
if (dst_root == &cgrp_dfl_root)
tmp_ss_mask &= ~cgrp_dfl_root_inhibit_ss_mask;
- ret = cgroup_populate_dir(&dst_root->cgrp, tmp_ss_mask);
- if (ret) {
- if (dst_root != &cgrp_dfl_root)
- return ret;
+ for_each_subsys_which(ss, ssid, &tmp_ss_mask) {
+ struct cgroup *scgrp = &ss->root->cgrp;
+ int tssid;
+
+ ret = css_populate_dir(cgroup_css(scgrp, ss), dcgrp);
+ if (!ret)
+ continue;
/*
* Rebinding back to the default root is not allowed to
@@ -1317,57 +1490,67 @@ static int rebind_subsystems(struct cgroup_root *dst_root,
* be rare. Moving subsystems back and forth even more so.
* Just warn about it and continue.
*/
- if (cgrp_dfl_root_visible) {
- pr_warn("failed to create files (%d) while rebinding 0x%lx to default root\n",
- ret, ss_mask);
- pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
+ if (dst_root == &cgrp_dfl_root) {
+ if (cgrp_dfl_root_visible) {
+ pr_warn("failed to create files (%d) while rebinding 0x%lx to default root\n",
+ ret, ss_mask);
+ pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
+ }
+ continue;
+ }
+
+ for_each_subsys_which(ss, tssid, &tmp_ss_mask) {
+ if (tssid == ssid)
+ break;
+ css_clear_dir(cgroup_css(scgrp, ss), dcgrp);
}
+ return ret;
}
/*
* Nothing can fail from this point on. Remove files for the
* removed subsystems and rebind each subsystem.
*/
- for_each_subsys_which(ss, ssid, &ss_mask)
- cgroup_clear_dir(&ss->root->cgrp, 1 << ssid);
-
for_each_subsys_which(ss, ssid, &ss_mask) {
- struct cgroup_root *src_root;
- struct cgroup_subsys_state *css;
+ struct cgroup_root *src_root = ss->root;
+ struct cgroup *scgrp = &src_root->cgrp;
+ struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
struct css_set *cset;
- src_root = ss->root;
- css = cgroup_css(&src_root->cgrp, ss);
+ WARN_ON(!css || cgroup_css(dcgrp, ss));
- WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss));
+ css_clear_dir(css, NULL);
- RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL);
- rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css);
+ RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
+ rcu_assign_pointer(dcgrp->subsys[ssid], css);
ss->root = dst_root;
- css->cgroup = &dst_root->cgrp;
+ css->cgroup = dcgrp;
- down_write(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
hash_for_each(css_set_table, i, cset, hlist)
list_move_tail(&cset->e_cset_node[ss->id],
- &dst_root->cgrp.e_csets[ss->id]);
- up_write(&css_set_rwsem);
+ &dcgrp->e_csets[ss->id]);
+ spin_unlock_bh(&css_set_lock);
src_root->subsys_mask &= ~(1 << ssid);
- src_root->cgrp.subtree_control &= ~(1 << ssid);
- cgroup_refresh_child_subsys_mask(&src_root->cgrp);
+ scgrp->subtree_control &= ~(1 << ssid);
+ cgroup_refresh_child_subsys_mask(scgrp);
/* default hierarchy doesn't enable controllers by default */
dst_root->subsys_mask |= 1 << ssid;
- if (dst_root != &cgrp_dfl_root) {
- dst_root->cgrp.subtree_control |= 1 << ssid;
- cgroup_refresh_child_subsys_mask(&dst_root->cgrp);
+ if (dst_root == &cgrp_dfl_root) {
+ static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
+ } else {
+ dcgrp->subtree_control |= 1 << ssid;
+ cgroup_refresh_child_subsys_mask(dcgrp);
+ static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
}
if (ss->bind)
ss->bind(css);
}
- kernfs_activate(dst_root->cgrp.kn);
+ kernfs_activate(dcgrp->kn);
return 0;
}
@@ -1497,7 +1680,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
for_each_subsys(ss, i) {
if (strcmp(token, ss->legacy_name))
continue;
- if (ss->disabled)
+ if (!cgroup_ssid_enabled(i))
continue;
/* Mutually exclusive option 'all' + subsystem name */
@@ -1528,7 +1711,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
*/
if (all_ss || (!one_ss && !opts->none && !opts->name))
for_each_subsys(ss, i)
- if (!ss->disabled)
+ if (cgroup_ssid_enabled(i))
opts->subsys_mask |= (1 << i);
/*
@@ -1624,7 +1807,7 @@ static void cgroup_enable_task_cg_lists(void)
{
struct task_struct *p, *g;
- down_write(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
if (use_task_css_set_links)
goto out_unlock;
@@ -1654,14 +1837,16 @@ static void cgroup_enable_task_cg_lists(void)
if (!(p->flags & PF_EXITING)) {
struct css_set *cset = task_css_set(p);
- list_add(&p->cg_list, &cset->tasks);
+ if (!css_set_populated(cset))
+ css_set_update_populated(cset, true);
+ list_add_tail(&p->cg_list, &cset->tasks);
get_css_set(cset);
}
spin_unlock_irq(&p->sighand->siglock);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
out_unlock:
- up_write(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
}
static void init_cgroup_housekeeping(struct cgroup *cgrp)
@@ -1671,6 +1856,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
INIT_LIST_HEAD(&cgrp->self.sibling);
INIT_LIST_HEAD(&cgrp->self.children);
+ INIT_LIST_HEAD(&cgrp->self.files);
INIT_LIST_HEAD(&cgrp->cset_links);
INIT_LIST_HEAD(&cgrp->pidlists);
mutex_init(&cgrp->pidlist_mutex);
@@ -1708,7 +1894,6 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
{
LIST_HEAD(tmp_links);
struct cgroup *root_cgrp = &root->cgrp;
- struct cftype *base_files;
struct css_set *cset;
int i, ret;
@@ -1725,7 +1910,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
goto out;
/*
- * We're accessing css_set_count without locking css_set_rwsem here,
+ * We're accessing css_set_count without locking css_set_lock here,
* but that's OK - it can only be increased by someone holding
* cgroup_lock, and that's us. The worst that can happen is that we
* have some link structures left over
@@ -1747,12 +1932,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
}
root_cgrp->kn = root->kf_root->kn;
- if (root == &cgrp_dfl_root)
- base_files = cgroup_dfl_base_files;
- else
- base_files = cgroup_legacy_base_files;
-
- ret = cgroup_addrm_files(root_cgrp, base_files, true);
+ ret = css_populate_dir(&root_cgrp->self, NULL);
if (ret)
goto destroy_root;
@@ -1772,10 +1952,13 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
* Link the root cgroup in this hierarchy into all the css_set
* objects.
*/
- down_write(&css_set_rwsem);
- hash_for_each(css_set_table, i, cset, hlist)
+ spin_lock_bh(&css_set_lock);
+ hash_for_each(css_set_table, i, cset, hlist) {
link_css_set(&tmp_links, cset, root_cgrp);
- up_write(&css_set_rwsem);
+ if (css_set_populated(cset))
+ cgroup_update_populated(root_cgrp, true);
+ }
+ spin_unlock_bh(&css_set_lock);
BUG_ON(!list_empty(&root_cgrp->self.children));
BUG_ON(atomic_read(&root->nr_cgrps) != 1);
@@ -2008,7 +2191,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
char *path = NULL;
mutex_lock(&cgroup_mutex);
- down_read(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
@@ -2021,7 +2204,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
path = buf;
}
- up_read(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
mutex_unlock(&cgroup_mutex);
return path;
}
@@ -2049,6 +2232,49 @@ struct cgroup_taskset {
struct task_struct *cur_task;
};
+#define CGROUP_TASKSET_INIT(tset) (struct cgroup_taskset){ \
+ .src_csets = LIST_HEAD_INIT(tset.src_csets), \
+ .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \
+ .csets = &tset.src_csets, \
+}
+
+/**
+ * cgroup_taskset_add - try to add a migration target task to a taskset
+ * @task: target task
+ * @tset: target taskset
+ *
+ * Add @task, which is a migration target, to @tset. This function becomes
+ * noop if @task doesn't need to be migrated. @task's css_set should have
+ * been added as a migration source and @task->cg_list will be moved from
+ * the css_set's tasks list to mg_tasks one.
+ */
+static void cgroup_taskset_add(struct task_struct *task,
+ struct cgroup_taskset *tset)
+{
+ struct css_set *cset;
+
+ lockdep_assert_held(&css_set_lock);
+
+ /* @task either already exited or can't exit until the end */
+ if (task->flags & PF_EXITING)
+ return;
+
+ /* leave @task alone if post_fork() hasn't linked it yet */
+ if (list_empty(&task->cg_list))
+ return;
+
+ cset = task_css_set(task);
+ if (!cset->mg_src_cgrp)
+ return;
+
+ list_move_tail(&task->cg_list, &cset->mg_tasks);
+ if (list_empty(&cset->mg_node))
+ list_add_tail(&cset->mg_node, &tset->src_csets);
+ if (list_empty(&cset->mg_dst_cset->mg_node))
+ list_move_tail(&cset->mg_dst_cset->mg_node,
+ &tset->dst_csets);
+}
+
/**
* cgroup_taskset_first - reset taskset and return the first task
* @tset: taskset of interest
@@ -2096,47 +2322,86 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
}
/**
- * cgroup_task_migrate - move a task from one cgroup to another.
- * @old_cgrp: the cgroup @tsk is being migrated from
- * @tsk: the task being migrated
- * @new_cset: the new css_set @tsk is being attached to
+ * cgroup_taskset_migrate - migrate a taskset to a cgroup
+ * @tset: taget taskset
+ * @dst_cgrp: destination cgroup
*
- * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked.
+ * Migrate tasks in @tset to @dst_cgrp. This function fails iff one of the
+ * ->can_attach callbacks fails and guarantees that either all or none of
+ * the tasks in @tset are migrated. @tset is consumed regardless of
+ * success.
*/
-static void cgroup_task_migrate(struct cgroup *old_cgrp,
- struct task_struct *tsk,
- struct css_set *new_cset)
+static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
+ struct cgroup *dst_cgrp)
{
- struct css_set *old_cset;
-
- lockdep_assert_held(&cgroup_mutex);
- lockdep_assert_held(&css_set_rwsem);
+ struct cgroup_subsys_state *css, *failed_css = NULL;
+ struct task_struct *task, *tmp_task;
+ struct css_set *cset, *tmp_cset;
+ int i, ret;
- /*
- * We are synchronized through threadgroup_lock() against PF_EXITING
- * setting such that we can't race against cgroup_exit() changing the
- * css_set to init_css_set and dropping the old one.
- */
- WARN_ON_ONCE(tsk->flags & PF_EXITING);
- old_cset = task_css_set(tsk);
+ /* methods shouldn't be called if no task is actually migrating */
+ if (list_empty(&tset->src_csets))
+ return 0;
- get_css_set(new_cset);
- rcu_assign_pointer(tsk->cgroups, new_cset);
+ /* check that we can legitimately attach to the cgroup */
+ for_each_e_css(css, i, dst_cgrp) {
+ if (css->ss->can_attach) {
+ ret = css->ss->can_attach(css, tset);
+ if (ret) {
+ failed_css = css;
+ goto out_cancel_attach;
+ }
+ }
+ }
/*
- * Use move_tail so that cgroup_taskset_first() still returns the
- * leader after migration. This works because cgroup_migrate()
- * ensures that the dst_cset of the leader is the first on the
- * tset's dst_csets list.
+ * Now that we're guaranteed success, proceed to move all tasks to
+ * the new cgroup. There are no failure cases after here, so this
+ * is the commit point.
*/
- list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
+ spin_lock_bh(&css_set_lock);
+ list_for_each_entry(cset, &tset->src_csets, mg_node) {
+ list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
+ struct css_set *from_cset = task_css_set(task);
+ struct css_set *to_cset = cset->mg_dst_cset;
+
+ get_css_set(to_cset);
+ css_set_move_task(task, from_cset, to_cset, true);
+ put_css_set_locked(from_cset);
+ }
+ }
+ spin_unlock_bh(&css_set_lock);
/*
- * We just gained a reference on old_cset by taking it from the
- * task. As trading it for new_cset is protected by cgroup_mutex,
- * we're safe to drop it here; it will be freed under RCU.
+ * Migration is committed, all target tasks are now on dst_csets.
+ * Nothing is sensitive to fork() after this point. Notify
+ * controllers that migration is complete.
*/
- put_css_set_locked(old_cset);
+ tset->csets = &tset->dst_csets;
+
+ for_each_e_css(css, i, dst_cgrp)
+ if (css->ss->attach)
+ css->ss->attach(css, tset);
+
+ ret = 0;
+ goto out_release_tset;
+
+out_cancel_attach:
+ for_each_e_css(css, i, dst_cgrp) {
+ if (css == failed_css)
+ break;
+ if (css->ss->cancel_attach)
+ css->ss->cancel_attach(css, tset);
+ }
+out_release_tset:
+ spin_lock_bh(&css_set_lock);
+ list_splice_init(&tset->dst_csets, &tset->src_csets);
+ list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
+ list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
+ list_del_init(&cset->mg_node);
+ }
+ spin_unlock_bh(&css_set_lock);
+ return ret;
}
/**
@@ -2152,14 +2417,14 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
lockdep_assert_held(&cgroup_mutex);
- down_write(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
cset->mg_src_cgrp = NULL;
cset->mg_dst_cset = NULL;
list_del_init(&cset->mg_preload_node);
put_css_set_locked(cset);
}
- up_write(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
}
/**
@@ -2172,10 +2437,11 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
* @src_cset and add it to @preloaded_csets, which should later be cleaned
* up by cgroup_migrate_finish().
*
- * This function may be called without holding threadgroup_lock even if the
- * target is a process. Threads may be created and destroyed but as long
- * as cgroup_mutex is not dropped, no new css_set can be put into play and
- * the preloaded css_sets are guaranteed to cover all migrations.
+ * This function may be called without holding cgroup_threadgroup_rwsem
+ * even if the target is a process. Threads may be created and destroyed
+ * but as long as cgroup_mutex is not dropped, no new css_set can be put
+ * into play and the preloaded css_sets are guaranteed to cover all
+ * migrations.
*/
static void cgroup_migrate_add_src(struct css_set *src_cset,
struct cgroup *dst_cgrp,
@@ -2184,7 +2450,7 @@ static void cgroup_migrate_add_src(struct css_set *src_cset,
struct cgroup *src_cgrp;
lockdep_assert_held(&cgroup_mutex);
- lockdep_assert_held(&css_set_rwsem);
+ lockdep_assert_held(&css_set_lock);
src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
@@ -2273,12 +2539,12 @@ err:
/**
* cgroup_migrate - migrate a process or task to a cgroup
- * @cgrp: the destination cgroup
* @leader: the leader of the process or the task to migrate
* @threadgroup: whether @leader points to the whole process or a single task
+ * @cgrp: the destination cgroup
*
* Migrate a process or task denoted by @leader to @cgrp. If migrating a
- * process, the caller must be holding threadgroup_lock of @leader. The
+ * process, the caller must be holding cgroup_threadgroup_rwsem. The
* caller is also responsible for invoking cgroup_migrate_add_src() and
* cgroup_migrate_prepare_dst() on the targets before invoking this
* function and following up with cgroup_migrate_finish().
@@ -2289,115 +2555,29 @@ err:
* decided for all targets by invoking group_migrate_prepare_dst() before
* actually starting migrating.
*/
-static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
- bool threadgroup)
-{
- struct cgroup_taskset tset = {
- .src_csets = LIST_HEAD_INIT(tset.src_csets),
- .dst_csets = LIST_HEAD_INIT(tset.dst_csets),
- .csets = &tset.src_csets,
- };
- struct cgroup_subsys_state *css, *failed_css = NULL;
- struct css_set *cset, *tmp_cset;
- struct task_struct *task, *tmp_task;
- int i, ret;
+static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
+ struct cgroup *cgrp)
+{
+ struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
+ struct task_struct *task;
/*
* Prevent freeing of tasks while we take a snapshot. Tasks that are
* already PF_EXITING could be freed from underneath us unless we
* take an rcu_read_lock.
*/
- down_write(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
rcu_read_lock();
task = leader;
do {
- /* @task either already exited or can't exit until the end */
- if (task->flags & PF_EXITING)
- goto next;
-
- /* leave @task alone if post_fork() hasn't linked it yet */
- if (list_empty(&task->cg_list))
- goto next;
-
- cset = task_css_set(task);
- if (!cset->mg_src_cgrp)
- goto next;
-
- /*
- * cgroup_taskset_first() must always return the leader.
- * Take care to avoid disturbing the ordering.
- */
- list_move_tail(&task->cg_list, &cset->mg_tasks);
- if (list_empty(&cset->mg_node))
- list_add_tail(&cset->mg_node, &tset.src_csets);
- if (list_empty(&cset->mg_dst_cset->mg_node))
- list_move_tail(&cset->mg_dst_cset->mg_node,
- &tset.dst_csets);
- next:
+ cgroup_taskset_add(task, &tset);
if (!threadgroup)
break;
} while_each_thread(leader, task);
rcu_read_unlock();
- up_write(&css_set_rwsem);
-
- /* methods shouldn't be called if no task is actually migrating */
- if (list_empty(&tset.src_csets))
- return 0;
-
- /* check that we can legitimately attach to the cgroup */
- for_each_e_css(css, i, cgrp) {
- if (css->ss->can_attach) {
- ret = css->ss->can_attach(css, &tset);
- if (ret) {
- failed_css = css;
- goto out_cancel_attach;
- }
- }
- }
-
- /*
- * Now that we're guaranteed success, proceed to move all tasks to
- * the new cgroup. There are no failure cases after here, so this
- * is the commit point.
- */
- down_write(&css_set_rwsem);
- list_for_each_entry(cset, &tset.src_csets, mg_node) {
- list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list)
- cgroup_task_migrate(cset->mg_src_cgrp, task,
- cset->mg_dst_cset);
- }
- up_write(&css_set_rwsem);
-
- /*
- * Migration is committed, all target tasks are now on dst_csets.
- * Nothing is sensitive to fork() after this point. Notify
- * controllers that migration is complete.
- */
- tset.csets = &tset.dst_csets;
-
- for_each_e_css(css, i, cgrp)
- if (css->ss->attach)
- css->ss->attach(css, &tset);
-
- ret = 0;
- goto out_release_tset;
+ spin_unlock_bh(&css_set_lock);
-out_cancel_attach:
- for_each_e_css(css, i, cgrp) {
- if (css == failed_css)
- break;
- if (css->ss->cancel_attach)
- css->ss->cancel_attach(css, &tset);
- }
-out_release_tset:
- down_write(&css_set_rwsem);
- list_splice_init(&tset.dst_csets, &tset.src_csets);
- list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) {
- list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
- list_del_init(&cset->mg_node);
- }
- up_write(&css_set_rwsem);
- return ret;
+ return cgroup_taskset_migrate(&tset, cgrp);
}
/**
@@ -2406,7 +2586,7 @@ out_release_tset:
* @leader: the task or the leader of the threadgroup to be attached
* @threadgroup: attach the whole threadgroup?
*
- * Call holding cgroup_mutex and threadgroup_lock of @leader.
+ * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
*/
static int cgroup_attach_task(struct cgroup *dst_cgrp,
struct task_struct *leader, bool threadgroup)
@@ -2416,7 +2596,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
int ret;
/* look up all src csets */
- down_read(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
rcu_read_lock();
task = leader;
do {
@@ -2426,12 +2606,12 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
break;
} while_each_thread(leader, task);
rcu_read_unlock();
- up_read(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
/* prepare dst csets and commit */
ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
if (!ret)
- ret = cgroup_migrate(dst_cgrp, leader, threadgroup);
+ ret = cgroup_migrate(leader, threadgroup, dst_cgrp);
cgroup_migrate_finish(&preloaded_csets);
return ret;
@@ -2459,15 +2639,15 @@ static int cgroup_procs_write_permission(struct task_struct *task,
struct cgroup *cgrp;
struct inode *inode;
- down_read(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
- up_read(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
while (!cgroup_is_descendant(dst_cgrp, cgrp))
cgrp = cgroup_parent(cgrp);
ret = -ENOMEM;
- inode = kernfs_get_inode(sb, cgrp->procs_kn);
+ inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
if (inode) {
ret = inode_permission(inode, MAY_WRITE);
iput(inode);
@@ -2498,14 +2678,13 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
if (!cgrp)
return -ENODEV;
-retry_find_task:
+ percpu_down_write(&cgroup_threadgroup_rwsem);
rcu_read_lock();
if (pid) {
tsk = find_task_by_vpid(pid);
if (!tsk) {
- rcu_read_unlock();
ret = -ESRCH;
- goto out_unlock_cgroup;
+ goto out_unlock_rcu;
}
} else {
tsk = current;
@@ -2521,37 +2700,23 @@ retry_find_task:
*/
if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL;
- rcu_read_unlock();
- goto out_unlock_cgroup;
+ goto out_unlock_rcu;
}
get_task_struct(tsk);
rcu_read_unlock();
- threadgroup_lock(tsk);
- if (threadgroup) {
- if (!thread_group_leader(tsk)) {
- /*
- * a race with de_thread from another thread's exec()
- * may strip us of our leadership, if this happens,
- * there is no choice but to throw this task away and
- * try again; this is
- * "double-double-toil-and-trouble-check locking".
- */
- threadgroup_unlock(tsk);
- put_task_struct(tsk);
- goto retry_find_task;
- }
- }
-
ret = cgroup_procs_write_permission(tsk, cgrp, of);
if (!ret)
ret = cgroup_attach_task(cgrp, tsk, threadgroup);
- threadgroup_unlock(tsk);
-
put_task_struct(tsk);
-out_unlock_cgroup:
+ goto out_unlock_threadgroup;
+
+out_unlock_rcu:
+ rcu_read_unlock();
+out_unlock_threadgroup:
+ percpu_up_write(&cgroup_threadgroup_rwsem);
cgroup_kn_unlock(of->kn);
return ret ?: nbytes;
}
@@ -2573,9 +2738,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
if (root == &cgrp_dfl_root)
continue;
- down_read(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
from_cgrp = task_cgroup_from_root(from, root);
- up_read(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
retval = cgroup_attach_task(from_cgrp, tsk, false);
if (retval)
@@ -2690,14 +2855,17 @@ static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
static int cgroup_update_dfl_csses(struct cgroup *cgrp)
{
LIST_HEAD(preloaded_csets);
+ struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
struct cgroup_subsys_state *css;
struct css_set *src_cset;
int ret;
lockdep_assert_held(&cgroup_mutex);
+ percpu_down_write(&cgroup_threadgroup_rwsem);
+
/* look up all csses currently attached to @cgrp's subtree */
- down_read(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
struct cgrp_cset_link *link;
@@ -2709,68 +2877,31 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
cgroup_migrate_add_src(link->cset, cgrp,
&preloaded_csets);
}
- up_read(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
/* NULL dst indicates self on default hierarchy */
ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
if (ret)
goto out_finish;
+ spin_lock_bh(&css_set_lock);
list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
- struct task_struct *last_task = NULL, *task;
+ struct task_struct *task, *ntask;
/* src_csets precede dst_csets, break on the first dst_cset */
if (!src_cset->mg_src_cgrp)
break;
- /*
- * All tasks in src_cset need to be migrated to the
- * matching dst_cset. Empty it process by process. We
- * walk tasks but migrate processes. The leader might even
- * belong to a different cset but such src_cset would also
- * be among the target src_csets because the default
- * hierarchy enforces per-process membership.
- */
- while (true) {
- down_read(&css_set_rwsem);
- task = list_first_entry_or_null(&src_cset->tasks,
- struct task_struct, cg_list);
- if (task) {
- task = task->group_leader;
- WARN_ON_ONCE(!task_css_set(task)->mg_src_cgrp);
- get_task_struct(task);
- }
- up_read(&css_set_rwsem);
-
- if (!task)
- break;
-
- /* guard against possible infinite loop */
- if (WARN(last_task == task,
- "cgroup: update_dfl_csses failed to make progress, aborting in inconsistent state\n"))
- goto out_finish;
- last_task = task;
-
- threadgroup_lock(task);
- /* raced against de_thread() from another thread? */
- if (!thread_group_leader(task)) {
- threadgroup_unlock(task);
- put_task_struct(task);
- continue;
- }
-
- ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
-
- threadgroup_unlock(task);
- put_task_struct(task);
-
- if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
- goto out_finish;
- }
+ /* all tasks in src_csets need to be migrated */
+ list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
+ cgroup_taskset_add(task, &tset);
}
+ spin_unlock_bh(&css_set_lock);
+ ret = cgroup_taskset_migrate(&tset, cgrp);
out_finish:
cgroup_migrate_finish(&preloaded_csets);
+ percpu_up_write(&cgroup_threadgroup_rwsem);
return ret;
}
@@ -2797,7 +2928,8 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
if (tok[0] == '\0')
continue;
for_each_subsys_which(ss, ssid, &tmp_ss_mask) {
- if (ss->disabled || strcmp(tok + 1, ss->name))
+ if (!cgroup_ssid_enabled(ssid) ||
+ strcmp(tok + 1, ss->name))
continue;
if (*tok == '+') {
@@ -2921,7 +3053,8 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
ret = create_css(child, ss,
cgrp->subtree_control & (1 << ssid));
else
- ret = cgroup_populate_dir(child, 1 << ssid);
+ ret = css_populate_dir(cgroup_css(child, ss),
+ NULL);
if (ret)
goto err_undo_css;
}
@@ -2954,7 +3087,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
if (css_disable & (1 << ssid)) {
kill_css(css);
} else {
- cgroup_clear_dir(child, 1 << ssid);
+ css_clear_dir(css, NULL);
if (ss->css_reset)
ss->css_reset(css);
}
@@ -3002,15 +3135,16 @@ err_undo_css:
if (css_enable & (1 << ssid))
kill_css(css);
else
- cgroup_clear_dir(child, 1 << ssid);
+ css_clear_dir(css, NULL);
}
}
goto out_unlock;
}
-static int cgroup_populated_show(struct seq_file *seq, void *v)
+static int cgroup_events_show(struct seq_file *seq, void *v)
{
- seq_printf(seq, "%d\n", (bool)seq_css(seq)->cgroup->populated_cnt);
+ seq_printf(seq, "populated %d\n",
+ cgroup_is_populated(seq_css(seq)->cgroup));
return 0;
}
@@ -3153,7 +3287,8 @@ static int cgroup_kn_set_ugid(struct kernfs_node *kn)
return kernfs_setattr(kn, &iattr);
}
-static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
+static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
+ struct cftype *cft)
{
char name[CGROUP_FILE_NAME_MAX];
struct kernfs_node *kn;
@@ -3175,33 +3310,38 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
return ret;
}
- if (cft->write == cgroup_procs_write)
- cgrp->procs_kn = kn;
- else if (cft->seq_show == cgroup_populated_show)
- cgrp->populated_kn = kn;
+ if (cft->file_offset) {
+ struct cgroup_file *cfile = (void *)css + cft->file_offset;
+
+ kernfs_get(kn);
+ cfile->kn = kn;
+ list_add(&cfile->node, &css->files);
+ }
+
return 0;
}
/**
* cgroup_addrm_files - add or remove files to a cgroup directory
- * @cgrp: the target cgroup
+ * @css: the target css
+ * @cgrp: the target cgroup (usually css->cgroup)
* @cfts: array of cftypes to be added
* @is_add: whether to add or remove
*
* Depending on @is_add, add or remove files defined by @cfts on @cgrp.
- * For removals, this function never fails. If addition fails, this
- * function doesn't remove files already added. The caller is responsible
- * for cleaning up.
+ * For removals, this function never fails.
*/
-static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
+static int cgroup_addrm_files(struct cgroup_subsys_state *css,
+ struct cgroup *cgrp, struct cftype cfts[],
bool is_add)
{
- struct cftype *cft;
+ struct cftype *cft, *cft_end = NULL;
int ret;
lockdep_assert_held(&cgroup_mutex);
- for (cft = cfts; cft->name[0] != '\0'; cft++) {
+restart:
+ for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
/* does cft->flags tell us to skip this file on @cgrp? */
if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
continue;
@@ -3213,11 +3353,13 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
continue;
if (is_add) {
- ret = cgroup_add_file(cgrp, cft);
+ ret = cgroup_add_file(css, cgrp, cft);
if (ret) {
pr_warn("%s: failed to add %s, err=%d\n",
__func__, cft->name, ret);
- return ret;
+ cft_end = cft;
+ is_add = false;
+ goto restart;
}
} else {
cgroup_rm_file(cgrp, cft);
@@ -3243,7 +3385,7 @@ static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
if (cgroup_is_dead(cgrp))
continue;
- ret = cgroup_addrm_files(cgrp, cfts, is_add);
+ ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
if (ret)
break;
}
@@ -3355,7 +3497,7 @@ static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
int ret;
- if (ss->disabled)
+ if (!cgroup_ssid_enabled(ss->id))
return 0;
if (!cfts || cfts[0].name[0] == '\0')
@@ -3405,17 +3547,8 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
struct cftype *cft;
- /*
- * If legacy_flies_on_dfl, we want to show the legacy files on the
- * dfl hierarchy but iff the target subsystem hasn't been updated
- * for the dfl hierarchy yet.
- */
- if (!cgroup_legacy_files_on_dfl ||
- ss->dfl_cftypes != ss->legacy_cftypes) {
- for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
- cft->flags |= __CFTYPE_NOT_ON_DFL;
- }
-
+ for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
+ cft->flags |= __CFTYPE_NOT_ON_DFL;
return cgroup_add_cftypes(ss, cfts);
}
@@ -3430,10 +3563,10 @@ static int cgroup_task_count(const struct cgroup *cgrp)
int count = 0;
struct cgrp_cset_link *link;
- down_read(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link)
count += atomic_read(&link->cset->refcount);
- up_read(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
return count;
}
@@ -3665,22 +3798,25 @@ bool css_has_online_children(struct cgroup_subsys_state *css)
}
/**
- * css_advance_task_iter - advance a task itererator to the next css_set
+ * css_task_iter_advance_css_set - advance a task itererator to the next css_set
* @it: the iterator to advance
*
* Advance @it to the next css_set to walk.
*/
-static void css_advance_task_iter(struct css_task_iter *it)
+static void css_task_iter_advance_css_set(struct css_task_iter *it)
{
struct list_head *l = it->cset_pos;
struct cgrp_cset_link *link;
struct css_set *cset;
+ lockdep_assert_held(&css_set_lock);
+
/* Advance to the next non-empty css_set */
do {
l = l->next;
if (l == it->cset_head) {
it->cset_pos = NULL;
+ it->task_pos = NULL;
return;
}
@@ -3691,7 +3827,7 @@ static void css_advance_task_iter(struct css_task_iter *it)
link = list_entry(l, struct cgrp_cset_link, cset_link);
cset = link->cset;
}
- } while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks));
+ } while (!css_set_populated(cset));
it->cset_pos = l;
@@ -3702,6 +3838,52 @@ static void css_advance_task_iter(struct css_task_iter *it)
it->tasks_head = &cset->tasks;
it->mg_tasks_head = &cset->mg_tasks;
+
+ /*
+ * We don't keep css_sets locked across iteration steps and thus
+ * need to take steps to ensure that iteration can be resumed after
+ * the lock is re-acquired. Iteration is performed at two levels -
+ * css_sets and tasks in them.
+ *
+ * Once created, a css_set never leaves its cgroup lists, so a
+ * pinned css_set is guaranteed to stay put and we can resume
+ * iteration afterwards.
+ *
+ * Tasks may leave @cset across iteration steps. This is resolved
+ * by registering each iterator with the css_set currently being
+ * walked and making css_set_move_task() advance iterators whose
+ * next task is leaving.
+ */
+ if (it->cur_cset) {
+ list_del(&it->iters_node);
+ put_css_set_locked(it->cur_cset);
+ }
+ get_css_set(cset);
+ it->cur_cset = cset;
+ list_add(&it->iters_node, &cset->task_iters);
+}
+
+static void css_task_iter_advance(struct css_task_iter *it)
+{
+ struct list_head *l = it->task_pos;
+
+ lockdep_assert_held(&css_set_lock);
+ WARN_ON_ONCE(!l);
+
+ /*
+ * Advance iterator to find next entry. cset->tasks is consumed
+ * first and then ->mg_tasks. After ->mg_tasks, we move onto the
+ * next cset.
+ */
+ l = l->next;
+
+ if (l == it->tasks_head)
+ l = it->mg_tasks_head->next;
+
+ if (l == it->mg_tasks_head)
+ css_task_iter_advance_css_set(it);
+ else
+ it->task_pos = l;
}
/**
@@ -3713,19 +3895,16 @@ static void css_advance_task_iter(struct css_task_iter *it)
* css_task_iter_next() to walk through the tasks until the function
* returns NULL. On completion of iteration, css_task_iter_end() must be
* called.
- *
- * Note that this function acquires a lock which is released when the
- * iteration finishes. The caller can't sleep while iteration is in
- * progress.
*/
void css_task_iter_start(struct cgroup_subsys_state *css,
struct css_task_iter *it)
- __acquires(css_set_rwsem)
{
/* no one should try to iterate before mounting cgroups */
WARN_ON_ONCE(!use_task_css_set_links);
- down_read(&css_set_rwsem);
+ memset(it, 0, sizeof(*it));
+
+ spin_lock_bh(&css_set_lock);
it->ss = css->ss;
@@ -3736,7 +3915,9 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
it->cset_head = it->cset_pos;
- css_advance_task_iter(it);
+ css_task_iter_advance_css_set(it);
+
+ spin_unlock_bh(&css_set_lock);
}
/**
@@ -3749,30 +3930,23 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
*/
struct task_struct *css_task_iter_next(struct css_task_iter *it)
{
- struct task_struct *res;
- struct list_head *l = it->task_pos;
+ if (it->cur_task) {
+ put_task_struct(it->cur_task);
+ it->cur_task = NULL;
+ }
- /* If the iterator cg is NULL, we have no tasks */
- if (!it->cset_pos)
- return NULL;
- res = list_entry(l, struct task_struct, cg_list);
+ spin_lock_bh(&css_set_lock);
- /*
- * Advance iterator to find next entry. cset->tasks is consumed
- * first and then ->mg_tasks. After ->mg_tasks, we move onto the
- * next cset.
- */
- l = l->next;
+ if (it->task_pos) {
+ it->cur_task = list_entry(it->task_pos, struct task_struct,
+ cg_list);
+ get_task_struct(it->cur_task);
+ css_task_iter_advance(it);
+ }
- if (l == it->tasks_head)
- l = it->mg_tasks_head->next;
+ spin_unlock_bh(&css_set_lock);
- if (l == it->mg_tasks_head)
- css_advance_task_iter(it);
- else
- it->task_pos = l;
-
- return res;
+ return it->cur_task;
}
/**
@@ -3782,9 +3956,16 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
* Finish task iteration started by css_task_iter_start().
*/
void css_task_iter_end(struct css_task_iter *it)
- __releases(css_set_rwsem)
{
- up_read(&css_set_rwsem);
+ if (it->cur_cset) {
+ spin_lock_bh(&css_set_lock);
+ list_del(&it->iters_node);
+ put_css_set_locked(it->cur_cset);
+ spin_unlock_bh(&css_set_lock);
+ }
+
+ if (it->cur_task)
+ put_task_struct(it->cur_task);
}
/**
@@ -3809,10 +3990,10 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
mutex_lock(&cgroup_mutex);
/* all tasks in @from are being moved, all csets are source */
- down_read(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
list_for_each_entry(link, &from->cset_links, cset_link)
cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
- up_read(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
if (ret)
@@ -3830,7 +4011,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
css_task_iter_end(&it);
if (task) {
- ret = cgroup_migrate(to, task, false);
+ ret = cgroup_migrate(task, false, to);
put_task_struct(task);
}
} while (task && !ret);
@@ -4327,13 +4508,13 @@ static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
static struct cftype cgroup_dfl_base_files[] = {
{
.name = "cgroup.procs",
+ .file_offset = offsetof(struct cgroup, procs_file),
.seq_start = cgroup_pidlist_start,
.seq_next = cgroup_pidlist_next,
.seq_stop = cgroup_pidlist_stop,
.seq_show = cgroup_pidlist_show,
.private = CGROUP_FILE_PROCS,
.write = cgroup_procs_write,
- .mode = S_IRUGO | S_IWUSR,
},
{
.name = "cgroup.controllers",
@@ -4351,9 +4532,10 @@ static struct cftype cgroup_dfl_base_files[] = {
.write = cgroup_subtree_control_write,
},
{
- .name = "cgroup.populated",
+ .name = "cgroup.events",
.flags = CFTYPE_NOT_ON_ROOT,
- .seq_show = cgroup_populated_show,
+ .file_offset = offsetof(struct cgroup, events_file),
+ .seq_show = cgroup_events_show,
},
{ } /* terminate */
};
@@ -4368,7 +4550,6 @@ static struct cftype cgroup_legacy_base_files[] = {
.seq_show = cgroup_pidlist_show,
.private = CGROUP_FILE_PROCS,
.write = cgroup_procs_write,
- .mode = S_IRUGO | S_IWUSR,
},
{
.name = "cgroup.clone_children",
@@ -4388,7 +4569,6 @@ static struct cftype cgroup_legacy_base_files[] = {
.seq_show = cgroup_pidlist_show,
.private = CGROUP_FILE_TASKS,
.write = cgroup_tasks_write,
- .mode = S_IRUGO | S_IWUSR,
},
{
.name = "notify_on_release",
@@ -4405,37 +4585,6 @@ static struct cftype cgroup_legacy_base_files[] = {
{ } /* terminate */
};
-/**
- * cgroup_populate_dir - create subsys files in a cgroup directory
- * @cgrp: target cgroup
- * @subsys_mask: mask of the subsystem ids whose files should be added
- *
- * On failure, no file is added.
- */
-static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
-{
- struct cgroup_subsys *ss;
- int i, ret = 0;
-
- /* process cftsets of each subsystem */
- for_each_subsys(ss, i) {
- struct cftype *cfts;
-
- if (!(subsys_mask & (1 << i)))
- continue;
-
- list_for_each_entry(cfts, &ss->cfts, node) {
- ret = cgroup_addrm_files(cgrp, cfts, true);
- if (ret < 0)
- goto err;
- }
- }
- return 0;
-err:
- cgroup_clear_dir(cgrp, subsys_mask);
- return ret;
-}
-
/*
* css destruction is four-stage process.
*
@@ -4464,9 +4613,13 @@ static void css_free_work_fn(struct work_struct *work)
container_of(work, struct cgroup_subsys_state, destroy_work);
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
+ struct cgroup_file *cfile;
percpu_ref_exit(&css->refcnt);
+ list_for_each_entry(cfile, &css->files, node)
+ kernfs_put(cfile->kn);
+
if (ss) {
/* css free path */
int id = css->id;
@@ -4571,6 +4724,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
css->ss = ss;
INIT_LIST_HEAD(&css->sibling);
INIT_LIST_HEAD(&css->children);
+ INIT_LIST_HEAD(&css->files);
css->serial_nr = css_serial_nr_next++;
if (cgroup_parent(cgrp)) {
@@ -4653,7 +4807,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
css->id = err;
if (visible) {
- err = cgroup_populate_dir(cgrp, 1 << ss->id);
+ err = css_populate_dir(css, NULL);
if (err)
goto err_free_id;
}
@@ -4679,7 +4833,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
err_list_del:
list_del_rcu(&css->sibling);
- cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
+ css_clear_dir(css, NULL);
err_free_id:
cgroup_idr_remove(&ss->css_idr, css->id);
err_free_percpu_ref:
@@ -4696,7 +4850,6 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
struct cgroup_root *root;
struct cgroup_subsys *ss;
struct kernfs_node *kn;
- struct cftype *base_files;
int ssid, ret;
/* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
@@ -4772,12 +4925,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
if (ret)
goto out_destroy;
- if (cgroup_on_dfl(cgrp))
- base_files = cgroup_dfl_base_files;
- else
- base_files = cgroup_legacy_base_files;
-
- ret = cgroup_addrm_files(cgrp, base_files, true);
+ ret = css_populate_dir(&cgrp->self, NULL);
if (ret)
goto out_destroy;
@@ -4864,7 +5012,7 @@ static void kill_css(struct cgroup_subsys_state *css)
* This must happen before css is disassociated with its cgroup.
* See seq_css() for details.
*/
- cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
+ css_clear_dir(css, NULL);
/*
* Killing would put the base ref, but we need to keep it alive
@@ -4913,19 +5061,15 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
{
struct cgroup_subsys_state *css;
- bool empty;
int ssid;
lockdep_assert_held(&cgroup_mutex);
/*
- * css_set_rwsem synchronizes access to ->cset_links and prevents
- * @cgrp from being removed while put_css_set() is in progress.
+ * Only migration can raise populated from zero and we're already
+ * holding cgroup_mutex.
*/
- down_read(&css_set_rwsem);
- empty = list_empty(&cgrp->cset_links);
- up_read(&css_set_rwsem);
- if (!empty)
+ if (cgroup_is_populated(cgrp))
return -EBUSY;
/*
@@ -5023,6 +5167,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
have_fork_callback |= (bool)ss->fork << ss->id;
have_exit_callback |= (bool)ss->exit << ss->id;
+ have_free_callback |= (bool)ss->free << ss->id;
have_canfork_callback |= (bool)ss->can_fork << ss->id;
/* At system boot, before all subsystems have been
@@ -5071,6 +5216,8 @@ int __init cgroup_init_early(void)
return 0;
}
+static unsigned long cgroup_disable_mask __initdata;
+
/**
* cgroup_init - cgroup initialization
*
@@ -5081,8 +5228,9 @@ int __init cgroup_init(void)
{
struct cgroup_subsys *ss;
unsigned long key;
- int ssid, err;
+ int ssid;
+ BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
@@ -5116,14 +5264,15 @@ int __init cgroup_init(void)
* disabled flag and cftype registration needs kmalloc,
* both of which aren't available during early_init.
*/
- if (ss->disabled)
+ if (cgroup_disable_mask & (1 << ssid)) {
+ static_branch_disable(cgroup_subsys_enabled_key[ssid]);
+ printk(KERN_INFO "Disabling %s control group subsystem\n",
+ ss->name);
continue;
+ }
cgrp_dfl_root.subsys_mask |= 1 << ss->id;
- if (cgroup_legacy_files_on_dfl && !ss->dfl_cftypes)
- ss->dfl_cftypes = ss->legacy_cftypes;
-
if (!ss->dfl_cftypes)
cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;
@@ -5138,17 +5287,10 @@ int __init cgroup_init(void)
ss->bind(init_css_set.subsys[ssid]);
}
- err = sysfs_create_mount_point(fs_kobj, "cgroup");
- if (err)
- return err;
+ WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
+ WARN_ON(register_filesystem(&cgroup_fs_type));
+ WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations));
- err = register_filesystem(&cgroup_fs_type);
- if (err < 0) {
- sysfs_remove_mount_point(fs_kobj, "cgroup");
- return err;
- }
-
- proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
return 0;
}
@@ -5195,7 +5337,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
goto out;
mutex_lock(&cgroup_mutex);
- down_read(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
for_each_root(root) {
struct cgroup_subsys *ss;
@@ -5215,19 +5357,39 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
seq_printf(m, "%sname=%s", count ? "," : "",
root->name);
seq_putc(m, ':');
+
cgrp = task_cgroup_from_root(tsk, root);
- path = cgroup_path(cgrp, buf, PATH_MAX);
- if (!path) {
- retval = -ENAMETOOLONG;
- goto out_unlock;
+
+ /*
+ * On traditional hierarchies, all zombie tasks show up as
+ * belonging to the root cgroup. On the default hierarchy,
+ * while a zombie doesn't show up in "cgroup.procs" and
+ * thus can't be migrated, its /proc/PID/cgroup keeps
+ * reporting the cgroup it belonged to before exiting. If
+ * the cgroup is removed before the zombie is reaped,
+ * " (deleted)" is appended to the cgroup path.
+ */
+ if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
+ path = cgroup_path(cgrp, buf, PATH_MAX);
+ if (!path) {
+ retval = -ENAMETOOLONG;
+ goto out_unlock;
+ }
+ } else {
+ path = "/";
}
+
seq_puts(m, path);
- seq_putc(m, '\n');
+
+ if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
+ seq_puts(m, " (deleted)\n");
+ else
+ seq_putc(m, '\n');
}
retval = 0;
out_unlock:
- up_read(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
mutex_unlock(&cgroup_mutex);
kfree(buf);
out:
@@ -5251,7 +5413,8 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v)
for_each_subsys(ss, i)
seq_printf(m, "%s\t%d\t%d\t%d\n",
ss->legacy_name, ss->root->hierarchy_id,
- atomic_read(&ss->root->nr_cgrps), !ss->disabled);
+ atomic_read(&ss->root->nr_cgrps),
+ cgroup_ssid_enabled(i));
mutex_unlock(&cgroup_mutex);
return 0;
@@ -5372,7 +5535,7 @@ void cgroup_post_fork(struct task_struct *child,
* @child during its iteration.
*
* If we won the race, @child is associated with %current's
- * css_set. Grabbing css_set_rwsem guarantees both that the
+ * css_set. Grabbing css_set_lock guarantees both that the
* association is stable, and, on completion of the parent's
* migration, @child is visible in the source of migration or
* already in the destination cgroup. This guarantee is necessary
@@ -5387,14 +5550,13 @@ void cgroup_post_fork(struct task_struct *child,
if (use_task_css_set_links) {
struct css_set *cset;
- down_write(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
cset = task_css_set(current);
if (list_empty(&child->cg_list)) {
- rcu_assign_pointer(child->cgroups, cset);
- list_add(&child->cg_list, &cset->tasks);
get_css_set(cset);
+ css_set_move_task(child, NULL, cset, false);
}
- up_write(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
}
/*
@@ -5429,39 +5591,42 @@ void cgroup_exit(struct task_struct *tsk)
{
struct cgroup_subsys *ss;
struct css_set *cset;
- bool put_cset = false;
int i;
/*
* Unlink from @tsk from its css_set. As migration path can't race
- * with us, we can check cg_list without grabbing css_set_rwsem.
+ * with us, we can check css_set and cg_list without synchronization.
*/
+ cset = task_css_set(tsk);
+
if (!list_empty(&tsk->cg_list)) {
- down_write(&css_set_rwsem);
- list_del_init(&tsk->cg_list);
- up_write(&css_set_rwsem);
- put_cset = true;
+ spin_lock_bh(&css_set_lock);
+ css_set_move_task(tsk, cset, NULL, false);
+ spin_unlock_bh(&css_set_lock);
+ } else {
+ get_css_set(cset);
}
- /* Reassign the task to the init_css_set. */
- cset = task_css_set(tsk);
- RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
-
/* see cgroup_post_fork() for details */
- for_each_subsys_which(ss, i, &have_exit_callback) {
- struct cgroup_subsys_state *old_css = cset->subsys[i];
- struct cgroup_subsys_state *css = task_css(tsk, i);
+ for_each_subsys_which(ss, i, &have_exit_callback)
+ ss->exit(tsk);
+}
- ss->exit(css, old_css, tsk);
- }
+void cgroup_free(struct task_struct *task)
+{
+ struct css_set *cset = task_css_set(task);
+ struct cgroup_subsys *ss;
+ int ssid;
- if (put_cset)
- put_css_set(cset);
+ for_each_subsys_which(ss, ssid, &have_free_callback)
+ ss->free(task);
+
+ put_css_set(cset);
}
static void check_for_release(struct cgroup *cgrp)
{
- if (notify_on_release(cgrp) && !cgroup_has_tasks(cgrp) &&
+ if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
!css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
schedule_work(&cgrp->release_agent_work);
}
@@ -5540,25 +5705,13 @@ static int __init cgroup_disable(char *str)
if (strcmp(token, ss->name) &&
strcmp(token, ss->legacy_name))
continue;
-
- ss->disabled = 1;
- printk(KERN_INFO "Disabling %s control group subsystem\n",
- ss->name);
- break;
+ cgroup_disable_mask |= 1 << i;
}
}
return 1;
}
__setup("cgroup_disable=", cgroup_disable);
-static int __init cgroup_set_legacy_files_on_dfl(char *str)
-{
- printk("cgroup: using legacy files on the default hierarchy\n");
- cgroup_legacy_files_on_dfl = true;
- return 0;
-}
-__setup("cgroup__DEVEL__legacy_files_on_dfl", cgroup_set_legacy_files_on_dfl);
-
/**
* css_tryget_online_from_dir - get corresponding css from a cgroup dentry
* @dentry: directory dentry of interest
@@ -5662,7 +5815,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
if (!name_buf)
return -ENOMEM;
- down_read(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
rcu_read_lock();
cset = rcu_dereference(current->cgroups);
list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
@@ -5673,7 +5826,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
c->root->hierarchy_id, name_buf);
}
rcu_read_unlock();
- up_read(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
kfree(name_buf);
return 0;
}
@@ -5684,7 +5837,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
struct cgroup_subsys_state *css = seq_css(seq);
struct cgrp_cset_link *link;
- down_read(&css_set_rwsem);
+ spin_lock_bh(&css_set_lock);
list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
struct css_set *cset = link->cset;
struct task_struct *task;
@@ -5707,13 +5860,13 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
overflow:
seq_puts(seq, " ...\n");
}
- up_read(&css_set_rwsem);
+ spin_unlock_bh(&css_set_lock);
return 0;
}
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
- return (!cgroup_has_tasks(css->cgroup) &&
+ return (!cgroup_is_populated(css->cgroup) &&
!css_has_online_children(&css->cgroup->self));
}
diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
index 806cd7693ac8..cdd8df4e991c 100644
--- a/kernel/cgroup_pids.c
+++ b/kernel/cgroup_pids.c
@@ -266,11 +266,9 @@ static void pids_fork(struct task_struct *task, void *priv)
css_put(old_css);
}
-static void pids_exit(struct cgroup_subsys_state *css,
- struct cgroup_subsys_state *old_css,
- struct task_struct *task)
+static void pids_free(struct task_struct *task)
{
- struct pids_cgroup *pids = css_pids(old_css);
+ struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
pids_uncharge(pids, 1);
}
@@ -349,7 +347,7 @@ struct cgroup_subsys pids_cgrp_subsys = {
.can_fork = pids_can_fork,
.cancel_fork = pids_cancel_fork,
.fork = pids_fork,
- .exit = pids_exit,
+ .free = pids_free,
.legacy_cftypes = pids_files,
.dfl_cftypes = pids_files,
};
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9ef59a37c190..10ae73611d80 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -473,7 +473,8 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
/* On legacy hiearchy, we must be a subset of our parent cpuset. */
ret = -EACCES;
- if (!cgroup_on_dfl(cur->css.cgroup) && !is_cpuset_subset(trial, par))
+ if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+ !is_cpuset_subset(trial, par))
goto out;
/*
@@ -497,7 +498,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
* be changed to have empty cpus_allowed or mems_allowed.
*/
ret = -ENOSPC;
- if ((cgroup_has_tasks(cur->css.cgroup) || cur->attach_in_progress)) {
+ if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
if (!cpumask_empty(cur->cpus_allowed) &&
cpumask_empty(trial->cpus_allowed))
goto out;
@@ -879,7 +880,8 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some CPUs.
*/
- if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus))
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+ cpumask_empty(new_cpus))
cpumask_copy(new_cpus, parent->effective_cpus);
/* Skip the whole subtree if the cpumask remains the same. */
@@ -896,7 +898,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
cpumask_copy(cp->effective_cpus, new_cpus);
spin_unlock_irq(&callback_lock);
- WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
+ WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
update_tasks_cpumask(cp);
@@ -1135,7 +1137,8 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some MEMs.
*/
- if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems))
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+ nodes_empty(*new_mems))
*new_mems = parent->effective_mems;
/* Skip the whole subtree if the nodemask remains the same. */
@@ -1152,7 +1155,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
cp->effective_mems = *new_mems;
spin_unlock_irq(&callback_lock);
- WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
+ WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
update_tasks_nodemask(cp);
@@ -1440,7 +1443,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
/* allow moving tasks into an empty cpuset if on default hierarchy */
ret = -ENOSPC;
- if (!cgroup_on_dfl(css->cgroup) &&
+ if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
goto out_unlock;
@@ -1484,9 +1487,8 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
{
/* static buf protected by cpuset_mutex */
static nodemask_t cpuset_attach_nodemask_to;
- struct mm_struct *mm;
struct task_struct *task;
- struct task_struct *leader = cgroup_taskset_first(tset);
+ struct task_struct *leader;
struct cpuset *cs = css_cs(css);
struct cpuset *oldcs = cpuset_attach_old_cs;
@@ -1512,26 +1514,30 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
}
/*
- * Change mm, possibly for multiple threads in a threadgroup. This is
- * expensive and may sleep.
+ * Change mm for all threadgroup leaders. This is expensive and may
+ * sleep and should be moved outside migration path proper.
*/
cpuset_attach_nodemask_to = cs->effective_mems;
- mm = get_task_mm(leader);
- if (mm) {
- mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
-
- /*
- * old_mems_allowed is the same with mems_allowed here, except
- * if this task is being moved automatically due to hotplug.
- * In that case @mems_allowed has been updated and is empty,
- * so @old_mems_allowed is the right nodesets that we migrate
- * mm from.
- */
- if (is_memory_migrate(cs)) {
- cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
- &cpuset_attach_nodemask_to);
+ cgroup_taskset_for_each_leader(leader, tset) {
+ struct mm_struct *mm = get_task_mm(leader);
+
+ if (mm) {
+ mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
+
+ /*
+ * old_mems_allowed is the same with mems_allowed
+ * here, except if this task is being moved
+ * automatically due to hotplug. In that case
+ * @mems_allowed has been updated and is empty, so
+ * @old_mems_allowed is the right nodesets that we
+ * migrate mm from.
+ */
+ if (is_memory_migrate(cs)) {
+ cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
+ &cpuset_attach_nodemask_to);
+ }
+ mmput(mm);
}
- mmput(mm);
}
cs->old_mems_allowed = cpuset_attach_nodemask_to;
@@ -1594,9 +1600,6 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
case FILE_MEMORY_PRESSURE_ENABLED:
cpuset_memory_pressure_enabled = !!val;
break;
- case FILE_MEMORY_PRESSURE:
- retval = -EACCES;
- break;
case FILE_SPREAD_PAGE:
retval = update_flag(CS_SPREAD_PAGE, cs, val);
break;
@@ -1863,9 +1866,6 @@ static struct cftype files[] = {
{
.name = "memory_pressure",
.read_u64 = cpuset_read_u64,
- .write_u64 = cpuset_write_u64,
- .private = FILE_MEMORY_PRESSURE,
- .mode = S_IRUGO,
},
{
@@ -1952,7 +1952,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpuset_inc();
spin_lock_irq(&callback_lock);
- if (cgroup_on_dfl(cs->css.cgroup)) {
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
cs->effective_mems = parent->effective_mems;
}
@@ -2029,7 +2029,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
mutex_lock(&cpuset_mutex);
spin_lock_irq(&callback_lock);
- if (cgroup_on_dfl(root_css->cgroup)) {
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
top_cpuset.mems_allowed = node_possible_map;
} else {
@@ -2210,7 +2210,7 @@ retry:
cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
mems_updated = !nodes_equal(new_mems, cs->effective_mems);
- if (cgroup_on_dfl(cs->css.cgroup))
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
hotplug_update_tasks(cs, &new_cpus, &new_mems,
cpus_updated, mems_updated);
else
@@ -2241,7 +2241,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
static cpumask_t new_cpus;
static nodemask_t new_mems;
bool cpus_updated, mems_updated;
- bool on_dfl = cgroup_on_dfl(top_cpuset.css.cgroup);
+ bool on_dfl = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
mutex_lock(&cpuset_mutex);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 39db20c6248e..1a734e0adfa7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9460,17 +9460,9 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css,
task_function_call(task, __perf_cgroup_move, task);
}
-static void perf_cgroup_exit(struct cgroup_subsys_state *css,
- struct cgroup_subsys_state *old_css,
- struct task_struct *task)
-{
- task_function_call(task, __perf_cgroup_move, task);
-}
-
struct cgroup_subsys perf_event_cgrp_subsys = {
.css_alloc = perf_cgroup_css_alloc,
.css_free = perf_cgroup_css_free,
- .exit = perf_cgroup_exit,
.attach = perf_cgroup_attach,
};
#endif /* CONFIG_CGROUP_PERF */
diff --git a/kernel/fork.c b/kernel/fork.c
index a30fae45b486..f97f2c449f5c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -251,6 +251,7 @@ void __put_task_struct(struct task_struct *tsk)
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
+ cgroup_free(tsk);
task_numa_free(tsk);
security_task_free(tsk);
exit_creds(tsk);
@@ -1150,10 +1151,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
tty_audit_fork(sig);
sched_autogroup_fork(sig);
-#ifdef CONFIG_CGROUPS
- init_rwsem(&sig->group_rwsem);
-#endif
-
sig->oom_score_adj = current->signal->oom_score_adj;
sig->oom_score_adj_min = current->signal->oom_score_adj_min;
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index bd62f5cda746..6528a79d998d 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
+#include <linux/errno.h>
#include <keys/system_keyring.h>
#include <crypto/public_key.h>
#include "module-internal.h"
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 8f0324ef72ab..b16f35487b67 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -517,6 +517,7 @@ int check_syslog_permissions(int type, int source)
ok:
return security_syslog(type);
}
+EXPORT_SYMBOL_GPL(check_syslog_permissions);
static void append_char(char **pp, char *e, char c)
{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index aa5973220ad2..4d568ac9319e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8244,13 +8244,6 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
sched_move_task(task);
}
-static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
- struct cgroup_subsys_state *old_css,
- struct task_struct *task)
-{
- sched_move_task(task);
-}
-
#ifdef CONFIG_FAIR_GROUP_SCHED
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 shareval)
@@ -8582,7 +8575,6 @@ struct cgroup_subsys cpu_cgrp_subsys = {
.fork = cpu_cgroup_fork,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
- .exit = cpu_cgroup_exit,
.legacy_cftypes = cpu_files,
.early_init = 1,
};
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 8cbc3db671df..26a54461bf59 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -444,6 +444,7 @@ void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
*ut = p->utime;
*st = p->stime;
}
+EXPORT_SYMBOL_GPL(task_cputime_adjusted);
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
@@ -652,6 +653,7 @@ void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
task_cputime(p, &cputime.utime, &cputime.stime);
cputime_adjust(&cputime, &p->prev_cputime, ut, st);
}
+EXPORT_SYMBOL_GPL(task_cputime_adjusted);
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index bcb14cafe007..c579dbab2e36 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3199,6 +3199,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool;
int node;
+ int target_node = NUMA_NO_NODE;
lockdep_assert_held(&wq_pool_mutex);
@@ -3210,13 +3211,25 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
}
}
+ /* if cpumask is contained inside a NUMA node, we belong to that node */
+ if (wq_numa_enabled) {
+ for_each_node(node) {
+ if (cpumask_subset(attrs->cpumask,
+ wq_numa_possible_cpumask[node])) {
+ target_node = node;
+ break;
+ }
+ }
+ }
+
/* nope, create a new one */
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
if (!pool || init_worker_pool(pool) < 0)
goto fail;
lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
copy_workqueue_attrs(pool->attrs, attrs);
+ pool->node = target_node;
/*
* no_numa isn't a worker_pool attribute, always clear it. See
@@ -3224,17 +3237,6 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
*/
pool->attrs->no_numa = false;
- /* if cpumask is contained inside a NUMA node, we belong to that node */
- if (wq_numa_enabled) {
- for_each_node(node) {
- if (cpumask_subset(pool->attrs->cpumask,
- wq_numa_possible_cpumask[node])) {
- pool->node = node;
- break;
- }
- }
- }
-
if (worker_pool_assign_id(pool) < 0)
goto fail;
diff --git a/lib/digsig.c b/lib/digsig.c
index ae05ea393fc8..07be6c1ef4e2 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -79,12 +79,13 @@ static int digsig_verify_rsa(struct key *key,
unsigned char *out1 = NULL;
const char *m;
MPI in = NULL, res = NULL, pkey[2];
- uint8_t *p, *datap, *endp;
- struct user_key_payload *ukp;
+ uint8_t *p, *datap;
+ const uint8_t *endp;
+ const struct user_key_payload *ukp;
struct pubkey_hdr *pkh;
down_read(&key->sem);
- ukp = key->payload.data;
+ ukp = user_key_payload(key);
if (ukp->datalen < sizeof(*pkh))
goto err1;
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index b1c93e94ca7a..858dc1aae478 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -11,10 +11,6 @@
#include <linux/dma-mapping.h>
#include <linux/hash.h>
-#ifndef DMA_ERROR_CODE
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-#endif
-
static unsigned long iommu_large_alloc = 15;
static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
@@ -123,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
/* Sanity check */
if (unlikely(npages == 0)) {
WARN_ON_ONCE(1);
- return DMA_ERROR_CODE;
+ return IOMMU_ERROR_CODE;
}
if (largealloc) {
@@ -206,7 +202,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
goto again;
} else {
/* give up */
- n = DMA_ERROR_CODE;
+ n = IOMMU_ERROR_CODE;
goto bail;
}
}
@@ -259,7 +255,7 @@ void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
unsigned long flags;
unsigned long shift = iommu->table_shift;
- if (entry == DMA_ERROR_CODE) /* use default addr->entry mapping */
+ if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
entry = (dma_addr - iommu->table_map_base) >> shift;
pool = get_pool(iommu, entry);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 38765d8e7e18..bc502e590366 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -435,7 +435,7 @@ struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
memcg = page->mem_cgroup;
- if (!memcg || !cgroup_on_dfl(memcg->css.cgroup))
+ if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
memcg = root_mem_cgroup;
rcu_read_unlock();
@@ -2891,7 +2891,7 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
* of course permitted.
*/
mutex_lock(&memcg_create_mutex);
- if (cgroup_has_tasks(memcg->css.cgroup) ||
+ if (cgroup_is_populated(memcg->css.cgroup) ||
(memcg->use_hierarchy && memcg_has_children(memcg)))
err = -EBUSY;
mutex_unlock(&memcg_create_mutex);
@@ -4030,8 +4030,7 @@ static struct cftype mem_cgroup_legacy_files[] = {
{
.name = "cgroup.event_control", /* XXX: for compat */
.write = memcg_write_event_control,
- .flags = CFTYPE_NO_PREFIX,
- .mode = S_IWUGO,
+ .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
},
{
.name = "swappiness",
@@ -4785,7 +4784,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct mem_cgroup *from;
- struct task_struct *p;
+ struct task_struct *leader, *p;
struct mm_struct *mm;
unsigned long move_flags;
int ret = 0;
@@ -4799,7 +4798,20 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
if (!move_flags)
return 0;
- p = cgroup_taskset_first(tset);
+ /*
+ * Multi-process migrations only happen on the default hierarchy
+ * where charge immigration is not used. Perform charge
+ * immigration if @tset contains a leader and whine if there are
+ * multiple.
+ */
+ p = NULL;
+ cgroup_taskset_for_each_leader(leader, tset) {
+ WARN_ON_ONCE(p);
+ p = leader;
+ }
+ if (!p)
+ return 0;
+
from = mem_cgroup_from_task(p);
VM_BUG_ON(from == memcg);
@@ -5015,7 +5027,7 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
* guarantees that @root doesn't have any children, so turning it
* on for the root memcg is enough.
*/
- if (cgroup_on_dfl(root_css->cgroup))
+ if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
root_mem_cgroup->use_hierarchy = true;
else
root_mem_cgroup->use_hierarchy = false;
@@ -5162,6 +5174,7 @@ static struct cftype memory_files[] = {
{
.name = "events",
.flags = CFTYPE_NOT_ON_ROOT,
+ .file_offset = offsetof(struct mem_cgroup, events_file),
.seq_show = memory_events_show,
},
{ } /* terminate */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index fdd89978d083..55721b619aee 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -175,7 +175,7 @@ static bool sane_reclaim(struct scan_control *sc)
if (!memcg)
return true;
#ifdef CONFIG_CGROUP_WRITEBACK
- if (cgroup_on_dfl(memcg->css.cgroup))
+ if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
return true;
#endif
return false;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 54a00d66509e..78f098a20796 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -318,7 +318,7 @@ static int get_secret(struct ceph_crypto_key *dst, const char *name) {
goto out;
}
- ckey = ukey->payload.data;
+ ckey = ukey->payload.data[0];
err = ceph_crypto_key_clone(dst, ckey);
if (err)
goto out_key;
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 4440edcce0d6..42e8649c6e79 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -537,7 +537,7 @@ static int ceph_key_preparse(struct key_preparsed_payload *prep)
if (ret < 0)
goto err_ckey;
- prep->payload[0] = ckey;
+ prep->payload.data[0] = ckey;
prep->quotalen = datalen;
return 0;
@@ -549,14 +549,14 @@ err:
static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
{
- struct ceph_crypto_key *ckey = prep->payload[0];
+ struct ceph_crypto_key *ckey = prep->payload.data[0];
ceph_crypto_key_destroy(ckey);
kfree(ckey);
}
static void ceph_key_destroy(struct key *key)
{
- struct ceph_crypto_key *ckey = key->payload.data;
+ struct ceph_crypto_key *ckey = key->payload.data[0];
ceph_crypto_key_destroy(ckey);
kfree(ckey);
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 31cd4fd75486..c79b85eb4d4c 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -122,7 +122,7 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
goto bad_option_value;
kdebug("dns error no. = %lu", derrno);
- prep->type_data[0] = ERR_PTR(-derrno);
+ prep->payload.data[dns_key_error] = ERR_PTR(-derrno);
continue;
}
@@ -137,8 +137,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
/* don't cache the result if we're caching an error saying there's no
* result */
- if (prep->type_data[0]) {
- kleave(" = 0 [h_error %ld]", PTR_ERR(prep->type_data[0]));
+ if (prep->payload.data[dns_key_error]) {
+ kleave(" = 0 [h_error %ld]", PTR_ERR(prep->payload.data[dns_key_error]));
return 0;
}
@@ -155,7 +155,7 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
memcpy(upayload->data, data, result_len);
upayload->data[result_len] = '\0';
- prep->payload[0] = upayload;
+ prep->payload.data[dns_key_data] = upayload;
kleave(" = 0");
return 0;
}
@@ -167,7 +167,7 @@ static void dns_resolver_free_preparse(struct key_preparsed_payload *prep)
{
pr_devel("==>%s()\n", __func__);
- kfree(prep->payload[0]);
+ kfree(prep->payload.data[dns_key_data]);
}
/*
@@ -223,10 +223,10 @@ static int dns_resolver_match_preparse(struct key_match_data *match_data)
*/
static void dns_resolver_describe(const struct key *key, struct seq_file *m)
{
- int err = key->type_data.x[0];
-
seq_puts(m, key->description);
if (key_is_instantiated(key)) {
+ int err = PTR_ERR(key->payload.data[dns_key_error]);
+
if (err)
seq_printf(m, ": %d", err);
else
@@ -241,8 +241,10 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
static long dns_resolver_read(const struct key *key,
char __user *buffer, size_t buflen)
{
- if (key->type_data.x[0])
- return key->type_data.x[0];
+ int err = PTR_ERR(key->payload.data[dns_key_error]);
+
+ if (err)
+ return err;
return user_read(key, buffer, buflen);
}
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 39d2c39bdf87..4677b6fa6dda 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -70,7 +70,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
const char *options, char **_result, time_t *_expiry)
{
struct key *rkey;
- struct user_key_payload *upayload;
+ const struct user_key_payload *upayload;
const struct cred *saved_cred;
size_t typelen, desclen;
char *desc, *cp;
@@ -137,12 +137,11 @@ int dns_query(const char *type, const char *name, size_t namelen,
goto put;
/* If the DNS server gave an error, return that to the caller */
- ret = rkey->type_data.x[0];
+ ret = PTR_ERR(rkey->payload.data[dns_key_error]);
if (ret)
goto put;
- upayload = rcu_dereference_protected(rkey->payload.data,
- lockdep_is_held(&rkey->sem));
+ upayload = user_key_payload(rkey);
len = upayload->datalen;
ret = -ENOMEM;
diff --git a/net/dns_resolver/internal.h b/net/dns_resolver/internal.h
index 7af1ed39c009..0c570d40e4d6 100644
--- a/net/dns_resolver/internal.h
+++ b/net/dns_resolver/internal.h
@@ -23,6 +23,14 @@
#include <linux/sched.h>
/*
+ * Layout of key payload words.
+ */
+enum {
+ dns_key_data,
+ dns_key_error,
+};
+
+/*
* dns_key.c
*/
extern const struct cred *dns_resolver_cache;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 25d60ed15284..1f8a144a5dc2 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -305,7 +305,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
if (!key)
key = rx->key;
- if (key && !key->payload.data)
+ if (key && !key->payload.data[0])
key = NULL; /* a no-security key */
bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp);
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index db0f39f5ef96..da3cc09f683e 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -148,10 +148,10 @@ static int rxrpc_preparse_xdr_rxkad(struct key_preparsed_payload *prep,
token->kad->ticket[6], token->kad->ticket[7]);
/* count the number of tokens attached */
- prep->type_data[0] = (void *)((unsigned long)prep->type_data[0] + 1);
+ prep->payload.data[1] = (void *)((unsigned long)prep->payload.data[1] + 1);
/* attach the data */
- for (pptoken = (struct rxrpc_key_token **)&prep->payload[0];
+ for (pptoken = (struct rxrpc_key_token **)&prep->payload.data[0];
*pptoken;
pptoken = &(*pptoken)->next)
continue;
@@ -522,7 +522,7 @@ static int rxrpc_preparse_xdr_rxk5(struct key_preparsed_payload *prep,
goto inval;
/* attach the payload */
- for (pptoken = (struct rxrpc_key_token **)&prep->payload[0];
+ for (pptoken = (struct rxrpc_key_token **)&prep->payload.data[0];
*pptoken;
pptoken = &(*pptoken)->next)
continue;
@@ -764,10 +764,10 @@ static int rxrpc_preparse(struct key_preparsed_payload *prep)
memcpy(&token->kad->ticket, v1->ticket, v1->ticket_length);
/* count the number of tokens attached */
- prep->type_data[0] = (void *)((unsigned long)prep->type_data[0] + 1);
+ prep->payload.data[1] = (void *)((unsigned long)prep->payload.data[1] + 1);
/* attach the data */
- pp = (struct rxrpc_key_token **)&prep->payload[0];
+ pp = (struct rxrpc_key_token **)&prep->payload.data[0];
while (*pp)
pp = &(*pp)->next;
*pp = token;
@@ -814,7 +814,7 @@ static void rxrpc_free_token_list(struct rxrpc_key_token *token)
*/
static void rxrpc_free_preparse(struct key_preparsed_payload *prep)
{
- rxrpc_free_token_list(prep->payload[0]);
+ rxrpc_free_token_list(prep->payload.data[0]);
}
/*
@@ -831,7 +831,7 @@ static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
if (prep->datalen != 8)
return -EINVAL;
- memcpy(&prep->type_data, prep->data, 8);
+ memcpy(&prep->payload.data[2], prep->data, 8);
ci = crypto_alloc_blkcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(ci)) {
@@ -842,7 +842,7 @@ static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
if (crypto_blkcipher_setkey(ci, prep->data, 8) < 0)
BUG();
- prep->payload[0] = ci;
+ prep->payload.data[0] = ci;
_leave(" = 0");
return 0;
}
@@ -852,8 +852,8 @@ static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
*/
static void rxrpc_free_preparse_s(struct key_preparsed_payload *prep)
{
- if (prep->payload[0])
- crypto_free_blkcipher(prep->payload[0]);
+ if (prep->payload.data[0])
+ crypto_free_blkcipher(prep->payload.data[0]);
}
/*
@@ -861,7 +861,7 @@ static void rxrpc_free_preparse_s(struct key_preparsed_payload *prep)
*/
static void rxrpc_destroy(struct key *key)
{
- rxrpc_free_token_list(key->payload.data);
+ rxrpc_free_token_list(key->payload.data[0]);
}
/*
@@ -869,9 +869,9 @@ static void rxrpc_destroy(struct key *key)
*/
static void rxrpc_destroy_s(struct key *key)
{
- if (key->payload.data) {
- crypto_free_blkcipher(key->payload.data);
- key->payload.data = NULL;
+ if (key->payload.data[0]) {
+ crypto_free_blkcipher(key->payload.data[0]);
+ key->payload.data[0] = NULL;
}
}
@@ -1070,7 +1070,7 @@ static long rxrpc_read(const struct key *key,
size += 1 * 4; /* token count */
ntoks = 0;
- for (token = key->payload.data; token; token = token->next) {
+ for (token = key->payload.data[0]; token; token = token->next) {
toksize = 4; /* sec index */
switch (token->security_index) {
@@ -1163,7 +1163,7 @@ static long rxrpc_read(const struct key *key,
ENCODE(ntoks);
tok = 0;
- for (token = key->payload.data; token; token = token->next) {
+ for (token = key->payload.data[0]; token; token = token->next) {
toksize = toksizes[tok++];
ENCODE(toksize);
oldxdr = xdr;
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index c0042807bfc6..a40d3afe93b7 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -158,7 +158,7 @@ int rxrpc_client_sendmsg(struct rxrpc_sock *rx, struct rxrpc_transport *trans,
service_id = htons(srx->srx_service);
}
key = rx->key;
- if (key && !rx->key->payload.data)
+ if (key && !rx->key->payload.data[0])
key = NULL;
bundle = rxrpc_get_bundle(rx, trans, key, service_id,
GFP_KERNEL);
diff --git a/net/rxrpc/ar-security.c b/net/rxrpc/ar-security.c
index 49b3cc31ee1f..8334474eb26c 100644
--- a/net/rxrpc/ar-security.c
+++ b/net/rxrpc/ar-security.c
@@ -137,9 +137,9 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
if (ret < 0)
return ret;
- if (!key->payload.data)
+ token = key->payload.data[0];
+ if (!token)
return -EKEYREJECTED;
- token = key->payload.data;
sec = rxrpc_security_lookup(token->security_index);
if (!sec)
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index f226709ebd8f..d7a9ab5a9d9c 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -67,7 +67,7 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
_enter("{%d},{%x}", conn->debug_id, key_serial(conn->key));
- token = conn->key->payload.data;
+ token = conn->key->payload.data[0];
conn->security_ix = token->security_index;
ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
@@ -125,7 +125,7 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
if (!conn->key)
return;
- token = conn->key->payload.data;
+ token = conn->key->payload.data[0];
memcpy(&iv, token->kad->session_key, sizeof(iv));
desc.tfm = conn->cipher;
@@ -221,7 +221,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
rxkhdr.checksum = 0;
/* encrypt from the session key */
- token = call->conn->key->payload.data;
+ token = call->conn->key->payload.data[0];
memcpy(&iv, token->kad->session_key, sizeof(iv));
desc.tfm = call->conn->cipher;
desc.info = iv.x;
@@ -433,7 +433,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
skb_to_sgvec(skb, sg, 0, skb->len);
/* decrypt from the session key */
- token = call->conn->key->payload.data;
+ token = call->conn->key->payload.data[0];
memcpy(&iv, token->kad->session_key, sizeof(iv));
desc.tfm = call->conn->cipher;
desc.info = iv.x;
@@ -780,7 +780,7 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
if (conn->security_level < min_level)
goto protocol_error;
- token = conn->key->payload.data;
+ token = conn->key->payload.data[0];
/* build the response packet */
memset(&resp, 0, sizeof(resp));
@@ -848,12 +848,12 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
}
}
- ASSERT(conn->server_key->payload.data != NULL);
+ ASSERT(conn->server_key->payload.data[0] != NULL);
ASSERTCMP((unsigned long) ticket & 7UL, ==, 0);
- memcpy(&iv, &conn->server_key->type_data, sizeof(iv));
+ memcpy(&iv, &conn->server_key->payload.data[2], sizeof(iv));
- desc.tfm = conn->server_key->payload.data;
+ desc.tfm = conn->server_key->payload.data[0];
desc.info = iv.x;
desc.flags = 0;
diff --git a/scripts/.gitignore b/scripts/.gitignore
index 12efbbefd4d7..1f78169d4254 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -8,6 +8,7 @@ unifdef
ihex2fw
recordmcount
docproc
+check-lc_ctype
sortextable
asn1_compiler
extract-cert
diff --git a/scripts/Makefile b/scripts/Makefile
index 1b2661712d44..fd0d53d4a234 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -7,6 +7,7 @@
# conmakehash: Create chartable
# conmakehash: Create arrays for initializing the kernel console tables
# docproc: Used in Documentation/DocBook
+# check-lc_ctype: Used in Documentation/DocBook
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
@@ -27,14 +28,16 @@ HOSTLOADLIBES_extract-cert = -lcrypto
always := $(hostprogs-y) $(hostprogs-m)
# The following hostprogs-y programs are only build on demand
-hostprogs-y += unifdef docproc
+hostprogs-y += unifdef docproc check-lc_ctype
# These targets are used internally to avoid "is up to date" messages
-PHONY += build_unifdef build_docproc
+PHONY += build_unifdef build_docproc build_check-lc_ctype
build_unifdef: $(obj)/unifdef
@:
build_docproc: $(obj)/docproc
@:
+build_check-lc_ctype: $(obj)/check-lc_ctype
+ @:
subdir-$(CONFIG_MODVERSIONS) += genksyms
subdir-y += mod
diff --git a/scripts/check-lc_ctype.c b/scripts/check-lc_ctype.c
new file mode 100644
index 000000000000..9097ff5449fb
--- /dev/null
+++ b/scripts/check-lc_ctype.c
@@ -0,0 +1,11 @@
+/*
+ * Check that a specified locale works as LC_CTYPE. Used by the
+ * DocBook build system to probe for C.UTF-8 support.
+ */
+
+#include <locale.h>
+
+int main(void)
+{
+ return !setlocale(LC_CTYPE, "");
+}
diff --git a/scripts/extract-module-sig.pl b/scripts/extract-module-sig.pl
new file mode 100755
index 000000000000..faac6f2e377f
--- /dev/null
+++ b/scripts/extract-module-sig.pl
@@ -0,0 +1,136 @@
+#!/usr/bin/perl -w
+#
+# extract-mod-sig <part> <module-file>
+#
+# Reads the module file and writes out some or all of the signature
+# section to stdout. Part is the bit to be written and is one of:
+#
+# -0: The unsigned module, no signature data at all
+# -a: All of the signature data, including magic number
+# -d: Just the descriptor values as a sequence of numbers
+# -n: Just the signer's name
+# -k: Just the key ID
+# -s: Just the crypto signature or PKCS#7 message
+#
+use strict;
+
+die "Format: $0 -[0adnks] module-file >out\n"
+ if ($#ARGV != 1);
+
+my $part = $ARGV[0];
+my $modfile = $ARGV[1];
+
+my $magic_number = "~Module signature appended~\n";
+
+#
+# Read the module contents
+#
+open FD, "<$modfile" || die $modfile;
+binmode(FD);
+my @st = stat(FD);
+die "$modfile" unless (@st);
+my $buf = "";
+my $len = sysread(FD, $buf, $st[7]);
+die "$modfile" unless (defined($len));
+die "Short read on $modfile\n" unless ($len == $st[7]);
+close(FD) || die $modfile;
+
+print STDERR "Read ", $len, " bytes from module file\n";
+
+die "The file is too short to have a sig magic number and descriptor\n"
+ if ($len < 12 + length($magic_number));
+
+#
+# Check for the magic number and extract the information block
+#
+my $p = $len - length($magic_number);
+my $raw_magic = substr($buf, $p);
+
+die "Magic number not found at $len\n"
+ if ($raw_magic ne $magic_number);
+print STDERR "Found magic number at $len\n";
+
+$p -= 12;
+my $raw_info = substr($buf, $p, 12);
+
+my @info = unpack("CCCCCxxxN", $raw_info);
+my ($algo, $hash, $id_type, $name_len, $kid_len, $sig_len) = @info;
+
+if ($id_type == 0) {
+ print STDERR "Found PGP key identifier\n";
+} elsif ($id_type == 1) {
+ print STDERR "Found X.509 cert identifier\n";
+} elsif ($id_type == 2) {
+ print STDERR "Found PKCS#7/CMS encapsulation\n";
+} else {
+ print STDERR "Found unsupported identifier type $id_type\n";
+}
+
+#
+# Extract the three pieces of info data
+#
+die "Insufficient name+kid+sig data in file\n"
+ unless ($p >= $name_len + $kid_len + $sig_len);
+
+$p -= $sig_len;
+my $raw_sig = substr($buf, $p, $sig_len);
+$p -= $kid_len;
+my $raw_kid = substr($buf, $p, $kid_len);
+$p -= $name_len;
+my $raw_name = substr($buf, $p, $name_len);
+
+my $module_len = $p;
+
+if ($sig_len > 0) {
+ print STDERR "Found $sig_len bytes of signature [";
+ my $n = $sig_len > 16 ? 16 : $sig_len;
+ foreach my $i (unpack("C" x $n, substr($raw_sig, 0, $n))) {
+ printf STDERR "%02x", $i;
+ }
+ print STDERR "]\n";
+}
+
+if ($kid_len > 0) {
+ print STDERR "Found $kid_len bytes of key identifier [";
+ my $n = $kid_len > 16 ? 16 : $kid_len;
+ foreach my $i (unpack("C" x $n, substr($raw_kid, 0, $n))) {
+ printf STDERR "%02x", $i;
+ }
+ print STDERR "]\n";
+}
+
+if ($name_len > 0) {
+ print STDERR "Found $name_len bytes of signer's name [$raw_name]\n";
+}
+
+#
+# Produce the requested output
+#
+if ($part eq "-0") {
+ # The unsigned module, no signature data at all
+ binmode(STDOUT);
+ print substr($buf, 0, $module_len);
+} elsif ($part eq "-a") {
+ # All of the signature data, including magic number
+ binmode(STDOUT);
+ print substr($buf, $module_len);
+} elsif ($part eq "-d") {
+ # Just the descriptor values as a sequence of numbers
+ print join(" ", @info), "\n";
+} elsif ($part eq "-n") {
+ # Just the signer's name
+ print STDERR "No signer's name for PKCS#7 message type sig\n"
+ if ($id_type == 2);
+ binmode(STDOUT);
+ print $raw_name;
+} elsif ($part eq "-k") {
+ # Just the key identifier
+ print STDERR "No key ID for PKCS#7 message type sig\n"
+ if ($id_type == 2);
+ binmode(STDOUT);
+ print $raw_kid;
+} elsif ($part eq "-s") {
+ # Just the crypto signature or PKCS#7 message
+ binmode(STDOUT);
+ print $raw_sig;
+}
diff --git a/scripts/extract-sys-certs.pl b/scripts/extract-sys-certs.pl
new file mode 100755
index 000000000000..d476e7d1fd88
--- /dev/null
+++ b/scripts/extract-sys-certs.pl
@@ -0,0 +1,144 @@
+#!/usr/bin/perl -w
+#
+use strict;
+use Math::BigInt;
+use Fcntl "SEEK_SET";
+
+die "Format: $0 [-s <systemmap-file>] <vmlinux-file> <keyring-file>\n"
+ if ($#ARGV != 1 && $#ARGV != 3 ||
+ $#ARGV == 3 && $ARGV[0] ne "-s");
+
+my $sysmap = "";
+if ($#ARGV == 3) {
+ shift;
+ $sysmap = $ARGV[0];
+ shift;
+}
+
+my $vmlinux = $ARGV[0];
+my $keyring = $ARGV[1];
+
+#
+# Parse the vmlinux section table
+#
+open FD, "objdump -h $vmlinux |" || die $vmlinux;
+my @lines = <FD>;
+close(FD) || die $vmlinux;
+
+my @sections = ();
+
+foreach my $line (@lines) {
+ chomp($line);
+ if ($line =~ /\s*([0-9]+)\s+(\S+)\s+([0-9a-f]+)\s+([0-9a-f]+)\s+([0-9a-f]+)\s+([0-9a-f]+)\s+2[*][*]([0-9]+)/
+ ) {
+ my $seg = $1;
+ my $name = $2;
+ my $len = Math::BigInt->new("0x" . $3);
+ my $vma = Math::BigInt->new("0x" . $4);
+ my $lma = Math::BigInt->new("0x" . $5);
+ my $foff = Math::BigInt->new("0x" . $6);
+ my $align = 2 ** $7;
+
+ push @sections, { name => $name,
+ vma => $vma,
+ len => $len,
+ foff => $foff };
+ }
+}
+
+print "Have $#sections sections\n";
+
+#
+# Try and parse the vmlinux symbol table. If the vmlinux file has been created
+# from a vmlinuz file with extract-vmlinux then the symbol table will be empty.
+#
+open FD, "nm $vmlinux 2>/dev/null |" || die $vmlinux;
+@lines = <FD>;
+close(FD) || die $vmlinux;
+
+my %symbols = ();
+my $nr_symbols = 0;
+
+sub parse_symbols(@) {
+ foreach my $line (@_) {
+ chomp($line);
+ if ($line =~ /([0-9a-f]+)\s([a-zA-Z])\s(\S+)/
+ ) {
+ my $addr = "0x" . $1;
+ my $type = $2;
+ my $name = $3;
+
+ $symbols{$name} = $addr;
+ $nr_symbols++;
+ }
+ }
+}
+parse_symbols(@lines);
+
+if ($nr_symbols == 0 && $sysmap ne "") {
+ print "No symbols in vmlinux, trying $sysmap\n";
+
+ open FD, "<$sysmap" || die $sysmap;
+ @lines = <FD>;
+ close(FD) || die $sysmap;
+ parse_symbols(@lines);
+}
+
+die "No symbols available\n"
+ if ($nr_symbols == 0);
+
+print "Have $nr_symbols symbols\n";
+
+die "Can't find system certificate list"
+ unless (exists($symbols{"__cert_list_start"}) &&
+ exists($symbols{"__cert_list_end"}));
+
+my $start = Math::BigInt->new($symbols{"__cert_list_start"});
+my $end = Math::BigInt->new($symbols{"__cert_list_end"});
+my $size = $end - $start;
+
+printf "Have %u bytes of certs at VMA 0x%x\n", $size, $start;
+
+my $s = undef;
+foreach my $sec (@sections) {
+ my $s_name = $sec->{name};
+ my $s_vma = $sec->{vma};
+ my $s_len = $sec->{len};
+ my $s_foff = $sec->{foff};
+ my $s_vend = $s_vma + $s_len;
+
+ next unless ($start >= $s_vma);
+ next if ($start >= $s_vend);
+
+ die "Cert object partially overflows section $s_name\n"
+ if ($end > $s_vend);
+
+ die "Cert object in multiple sections: ", $s_name, " and ", $s->{name}, "\n"
+ if ($s);
+ $s = $sec;
+}
+
+die "Cert object not inside a section\n"
+ unless ($s);
+
+print "Certificate list in section ", $s->{name}, "\n";
+
+my $foff = $start - $s->{vma} + $s->{foff};
+
+printf "Certificate list at file offset 0x%x\n", $foff;
+
+open FD, "<$vmlinux" || die $vmlinux;
+binmode(FD);
+die $vmlinux if (!defined(sysseek(FD, $foff, SEEK_SET)));
+my $buf = "";
+my $len = sysread(FD, $buf, $size);
+die "$vmlinux" if (!defined($len));
+die "Short read on $vmlinux\n" if ($len != $size);
+close(FD) || die $vmlinux;
+
+open FD, ">$keyring" || die $keyring;
+binmode(FD);
+$len = syswrite(FD, $buf, $size);
+die "$keyring" if (!defined($len));
+die "Short write on $keyring\n" if ($len != $size);
+close(FD) || die $keyring;
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 9a08fb5c1af6..125b906cd1d4 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -206,59 +206,73 @@ my $type_env = '(\$\w+)';
# One for each output format
# these work fairly well
-my %highlights_html = ( $type_constant, "<i>\$1</i>",
- $type_func, "<b>\$1</b>",
- $type_struct_xml, "<i>\$1</i>",
- $type_env, "<b><i>\$1</i></b>",
- $type_param, "<tt><b>\$1</b></tt>" );
+my @highlights_html = (
+ [$type_constant, "<i>\$1</i>"],
+ [$type_func, "<b>\$1</b>"],
+ [$type_struct_xml, "<i>\$1</i>"],
+ [$type_env, "<b><i>\$1</i></b>"],
+ [$type_param, "<tt><b>\$1</b></tt>"]
+ );
my $local_lt = "\\\\\\\\lt:";
my $local_gt = "\\\\\\\\gt:";
my $blankline_html = $local_lt . "p" . $local_gt; # was "<p>"
# html version 5
-my %highlights_html5 = ( $type_constant, "<span class=\"const\">\$1</span>",
- $type_func, "<span class=\"func\">\$1</span>",
- $type_struct_xml, "<span class=\"struct\">\$1</span>",
- $type_env, "<span class=\"env\">\$1</span>",
- $type_param, "<span class=\"param\">\$1</span>" );
+my @highlights_html5 = (
+ [$type_constant, "<span class=\"const\">\$1</span>"],
+ [$type_func, "<span class=\"func\">\$1</span>"],
+ [$type_struct_xml, "<span class=\"struct\">\$1</span>"],
+ [$type_env, "<span class=\"env\">\$1</span>"],
+ [$type_param, "<span class=\"param\">\$1</span>]"]
+ );
my $blankline_html5 = $local_lt . "br /" . $local_gt;
# XML, docbook format
-my %highlights_xml = ( "([^=])\\\"([^\\\"<]+)\\\"", "\$1<quote>\$2</quote>",
- $type_constant, "<constant>\$1</constant>",
- $type_func, "<function>\$1</function>",
- $type_struct_xml, "<structname>\$1</structname>",
- $type_env, "<envar>\$1</envar>",
- $type_param, "<parameter>\$1</parameter>" );
+my @highlights_xml = (
+ ["([^=])\\\"([^\\\"<]+)\\\"", "\$1<quote>\$2</quote>"],
+ [$type_constant, "<constant>\$1</constant>"],
+ [$type_struct_xml, "<structname>\$1</structname>"],
+ [$type_param, "<parameter>\$1</parameter>"],
+ [$type_func, "<function>\$1</function>"],
+ [$type_env, "<envar>\$1</envar>"]
+ );
my $blankline_xml = $local_lt . "/para" . $local_gt . $local_lt . "para" . $local_gt . "\n";
# gnome, docbook format
-my %highlights_gnome = ( $type_constant, "<replaceable class=\"option\">\$1</replaceable>",
- $type_func, "<function>\$1</function>",
- $type_struct, "<structname>\$1</structname>",
- $type_env, "<envar>\$1</envar>",
- $type_param, "<parameter>\$1</parameter>" );
+my @highlights_gnome = (
+ [$type_constant, "<replaceable class=\"option\">\$1</replaceable>"],
+ [$type_func, "<function>\$1</function>"],
+ [$type_struct, "<structname>\$1</structname>"],
+ [$type_env, "<envar>\$1</envar>"],
+ [$type_param, "<parameter>\$1</parameter>" ]
+ );
my $blankline_gnome = "</para><para>\n";
# these are pretty rough
-my %highlights_man = ( $type_constant, "\$1",
- $type_func, "\\\\fB\$1\\\\fP",
- $type_struct, "\\\\fI\$1\\\\fP",
- $type_param, "\\\\fI\$1\\\\fP" );
+my @highlights_man = (
+ [$type_constant, "\$1"],
+ [$type_func, "\\\\fB\$1\\\\fP"],
+ [$type_struct, "\\\\fI\$1\\\\fP"],
+ [$type_param, "\\\\fI\$1\\\\fP"]
+ );
my $blankline_man = "";
# text-mode
-my %highlights_text = ( $type_constant, "\$1",
- $type_func, "\$1",
- $type_struct, "\$1",
- $type_param, "\$1" );
+my @highlights_text = (
+ [$type_constant, "\$1"],
+ [$type_func, "\$1"],
+ [$type_struct, "\$1"],
+ [$type_param, "\$1"]
+ );
my $blankline_text = "";
# list mode
-my %highlights_list = ( $type_constant, "\$1",
- $type_func, "\$1",
- $type_struct, "\$1",
- $type_param, "\$1" );
+my @highlights_list = (
+ [$type_constant, "\$1"],
+ [$type_func, "\$1"],
+ [$type_struct, "\$1"],
+ [$type_param, "\$1"]
+ );
my $blankline_list = "";
# read arguments
@@ -273,7 +287,7 @@ my $verbose = 0;
my $output_mode = "man";
my $output_preformatted = 0;
my $no_doc_sections = 0;
-my %highlights = %highlights_man;
+my @highlights = @highlights_man;
my $blankline = $blankline_man;
my $modulename = "Kernel API";
my $function_only = 0;
@@ -374,31 +388,31 @@ while ($ARGV[0] =~ m/^-(.*)/) {
my $cmd = shift @ARGV;
if ($cmd eq "-html") {
$output_mode = "html";
- %highlights = %highlights_html;
+ @highlights = @highlights_html;
$blankline = $blankline_html;
} elsif ($cmd eq "-html5") {
$output_mode = "html5";
- %highlights = %highlights_html5;
+ @highlights = @highlights_html5;
$blankline = $blankline_html5;
} elsif ($cmd eq "-man") {
$output_mode = "man";
- %highlights = %highlights_man;
+ @highlights = @highlights_man;
$blankline = $blankline_man;
} elsif ($cmd eq "-text") {
$output_mode = "text";
- %highlights = %highlights_text;
+ @highlights = @highlights_text;
$blankline = $blankline_text;
} elsif ($cmd eq "-docbook") {
$output_mode = "xml";
- %highlights = %highlights_xml;
+ @highlights = @highlights_xml;
$blankline = $blankline_xml;
} elsif ($cmd eq "-list") {
$output_mode = "list";
- %highlights = %highlights_list;
+ @highlights = @highlights_list;
$blankline = $blankline_list;
} elsif ($cmd eq "-gnome") {
$output_mode = "gnome";
- %highlights = %highlights_gnome;
+ @highlights = @highlights_gnome;
$blankline = $blankline_gnome;
} elsif ($cmd eq "-module") { # not needed for XML, inherits from calling document
$modulename = shift @ARGV;
@@ -1746,7 +1760,7 @@ sub output_declaration {
my $func = "output_${functype}_$output_mode";
if (($function_only==0) ||
( $function_only == 1 && defined($function_table{$name})) ||
- ( $function_only == 2 && !defined($function_table{$name})))
+ ( $function_only == 2 && !($functype eq "function" && defined($function_table{$name}))))
{
&$func(@_);
$section_counter++;
@@ -1791,8 +1805,8 @@ sub dump_struct($$) {
$nested = $1;
# ignore members marked private:
- $members =~ s/\/\*\s*private:.*?\/\*\s*public:.*?\*\///gos;
- $members =~ s/\/\*\s*private:.*//gos;
+ $members =~ s/\/\*\s*private:.*?\/\*\s*public:.*?\*\///gosi;
+ $members =~ s/\/\*\s*private:.*//gosi;
# strip comments:
$members =~ s/\/\*.*?\*\///gos;
$nested =~ s/\/\*.*?\*\///gos;
@@ -1869,6 +1883,31 @@ sub dump_typedef($$) {
my $file = shift;
$x =~ s@/\*.*?\*/@@gos; # strip comments.
+
+ # Parse function prototypes
+ if ($x =~ /typedef\s+(\w+)\s*\(\*\s*(\w\S+)\s*\)\s*\((.*)\);/) {
+ # Function typedefs
+ $return_type = $1;
+ $declaration_name = $2;
+ my $args = $3;
+
+ create_parameterlist($args, ',', $file);
+
+ output_declaration($declaration_name,
+ 'function',
+ {'function' => $declaration_name,
+ 'module' => $modulename,
+ 'functiontype' => $return_type,
+ 'parameterlist' => \@parameterlist,
+ 'parameterdescs' => \%parameterdescs,
+ 'parametertypes' => \%parametertypes,
+ 'sectionlist' => \@sectionlist,
+ 'sections' => \%sections,
+ 'purpose' => $declaration_purpose
+ });
+ return;
+ }
+
while (($x =~ /\(*.\)\s*;$/) || ($x =~ /\[*.\]\s*;$/)) {
$x =~ s/\(*.\)\s*;$/;/;
$x =~ s/\[*.\]\s*;$/;/;
@@ -2391,12 +2430,13 @@ sub process_file($) {
my $descr;
my $in_purpose = 0;
my $initial_section_counter = $section_counter;
+ my ($orig_file) = @_;
if (defined($ENV{'SRCTREE'})) {
- $file = "$ENV{'SRCTREE'}" . "/" . "@_";
+ $file = "$ENV{'SRCTREE'}" . "/" . $orig_file;
}
else {
- $file = "@_";
+ $file = $orig_file;
}
if (defined($source_map{$file})) {
$file = $source_map{$file};
@@ -2640,7 +2680,7 @@ sub process_file($) {
print "<refentry>\n";
print " <refnamediv>\n";
print " <refname>\n";
- print " ${file}\n";
+ print " ${orig_file}\n";
print " </refname>\n";
print " <refpurpose>\n";
print " Document generation inconsistency\n";
@@ -2654,7 +2694,7 @@ sub process_file($) {
print " <para>\n";
print " The template for this document tried to insert\n";
print " the structured comment from the file\n";
- print " <filename>${file}</filename> at this point,\n";
+ print " <filename>${orig_file}</filename> at this point,\n";
print " but none was found.\n";
print " This dummy section is inserted to allow\n";
print " generation to continue.\n";
@@ -2671,9 +2711,11 @@ $kernelversion = get_kernel_version();
# generate a sequence of code that will splice in highlighting information
# using the s// operator.
-foreach my $pattern (sort keys %highlights) {
-# print STDERR "scanning pattern:$pattern, highlight:($highlights{$pattern})\n";
- $dohighlight .= "\$contents =~ s:$pattern:$highlights{$pattern}:gs;\n";
+foreach my $k (keys @highlights) {
+ my $pattern = $highlights[$k][0];
+ my $result = $highlights[$k][1];
+# print STDERR "scanning pattern:$pattern, highlight:($result)\n";
+ $dohighlight .= "\$contents =~ s:$pattern:$result:gs;\n";
}
# Read the file that maps relative names to absolute names for
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
index d49c53960b60..232469baa94f 100644
--- a/security/apparmor/Kconfig
+++ b/security/apparmor/Kconfig
@@ -33,7 +33,7 @@ config SECURITY_APPARMOR_BOOTPARAM_VALUE
config SECURITY_APPARMOR_HASH
bool "SHA1 hash of loaded profiles"
depends on SECURITY_APPARMOR
- depends on CRYPTO
+ select CRYPTO
select CRYPTO_SHA1
default y
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 36fb6b527829..5be9ffbe90ba 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -105,7 +105,7 @@ int __init integrity_load_x509(const unsigned int id, const char *path)
rc,
((KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ),
- KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_TRUSTED);
+ KEY_ALLOC_NOT_IN_QUOTA);
if (IS_ERR(key)) {
rc = PTR_ERR(key);
pr_err("Problem loading X.509 certificate (%d): %s\n",
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 159ef3ea4130..461f8d891579 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -247,7 +247,7 @@ int evm_init_key(void)
return -ENOENT;
down_read(&evm_key->sem);
- ekp = evm_key->payload.data;
+ ekp = evm_key->payload.data[0];
if (ekp->decrypted_datalen > MAX_KEY_SIZE) {
rc = -EINVAL;
goto out;
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index b6adb94f6d52..907c1522ee46 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -21,6 +21,16 @@
MODULE_LICENSE("GPL");
/*
+ * Layout of key payload words.
+ */
+enum {
+ big_key_data,
+ big_key_path,
+ big_key_path_2nd_part,
+ big_key_len,
+};
+
+/*
* If the data is under this limit, there's no point creating a shm file to
* hold it as the permanently resident metadata for the shmem fs will be at
* least as large as the data.
@@ -47,7 +57,7 @@ struct key_type key_type_big_key = {
*/
int big_key_preparse(struct key_preparsed_payload *prep)
{
- struct path *path = (struct path *)&prep->payload;
+ struct path *path = (struct path *)&prep->payload.data[big_key_path];
struct file *file;
ssize_t written;
size_t datalen = prep->datalen;
@@ -60,7 +70,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
/* Set an arbitrary quota */
prep->quotalen = 16;
- prep->type_data[1] = (void *)(unsigned long)datalen;
+ prep->payload.data[big_key_len] = (void *)(unsigned long)datalen;
if (datalen > BIG_KEY_FILE_THRESHOLD) {
/* Create a shmem file to store the data in. This will permit the data
@@ -94,7 +104,8 @@ int big_key_preparse(struct key_preparsed_payload *prep)
if (!data)
return -ENOMEM;
- prep->payload[0] = memcpy(data, prep->data, prep->datalen);
+ prep->payload.data[big_key_data] = data;
+ memcpy(data, prep->data, prep->datalen);
}
return 0;
@@ -110,10 +121,10 @@ error:
void big_key_free_preparse(struct key_preparsed_payload *prep)
{
if (prep->datalen > BIG_KEY_FILE_THRESHOLD) {
- struct path *path = (struct path *)&prep->payload;
+ struct path *path = (struct path *)&prep->payload.data[big_key_path];
path_put(path);
} else {
- kfree(prep->payload[0]);
+ kfree(prep->payload.data[big_key_data]);
}
}
@@ -123,11 +134,12 @@ void big_key_free_preparse(struct key_preparsed_payload *prep)
*/
void big_key_revoke(struct key *key)
{
- struct path *path = (struct path *)&key->payload.data2;
+ struct path *path = (struct path *)&key->payload.data[big_key_path];
/* clear the quota */
key_payload_reserve(key, 0);
- if (key_is_instantiated(key) && key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD)
+ if (key_is_instantiated(key) &&
+ (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
vfs_truncate(path, 0);
}
@@ -136,14 +148,16 @@ void big_key_revoke(struct key *key)
*/
void big_key_destroy(struct key *key)
{
- if (key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD) {
- struct path *path = (struct path *)&key->payload.data2;
+ size_t datalen = (size_t)key->payload.data[big_key_len];
+
+ if (datalen) {
+ struct path *path = (struct path *)&key->payload.data[big_key_path];
path_put(path);
path->mnt = NULL;
path->dentry = NULL;
} else {
- kfree(key->payload.data);
- key->payload.data = NULL;
+ kfree(key->payload.data[big_key_data]);
+ key->payload.data[big_key_data] = NULL;
}
}
@@ -152,12 +166,12 @@ void big_key_destroy(struct key *key)
*/
void big_key_describe(const struct key *key, struct seq_file *m)
{
- unsigned long datalen = key->type_data.x[1];
+ size_t datalen = (size_t)key->payload.data[big_key_len];
seq_puts(m, key->description);
if (key_is_instantiated(key))
- seq_printf(m, ": %lu [%s]",
+ seq_printf(m, ": %zu [%s]",
datalen,
datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
}
@@ -168,14 +182,14 @@ void big_key_describe(const struct key *key, struct seq_file *m)
*/
long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
{
- unsigned long datalen = key->type_data.x[1];
+ size_t datalen = (size_t)key->payload.data[big_key_len];
long ret;
if (!buffer || buflen < datalen)
return datalen;
if (datalen > BIG_KEY_FILE_THRESHOLD) {
- struct path *path = (struct path *)&key->payload.data2;
+ struct path *path = (struct path *)&key->payload.data[big_key_path];
struct file *file;
loff_t pos;
@@ -190,7 +204,8 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
ret = -EIO;
} else {
ret = datalen;
- if (copy_to_user(buffer, key->payload.data, datalen) != 0)
+ if (copy_to_user(buffer, key->payload.data[big_key_data],
+ datalen) != 0)
ret = -EFAULT;
}
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 7bed4ad7cd76..927db9f35ad6 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -303,10 +303,10 @@ out:
*
* Use a user provided key to encrypt/decrypt an encrypted-key.
*/
-static struct key *request_user_key(const char *master_desc, u8 **master_key,
+static struct key *request_user_key(const char *master_desc, const u8 **master_key,
size_t *master_keylen)
{
- struct user_key_payload *upayload;
+ const struct user_key_payload *upayload;
struct key *ukey;
ukey = request_key(&key_type_user, master_desc, NULL);
@@ -314,7 +314,7 @@ static struct key *request_user_key(const char *master_desc, u8 **master_key,
goto error;
down_read(&ukey->sem);
- upayload = ukey->payload.data;
+ upayload = user_key_payload(ukey);
*master_key = upayload->data;
*master_keylen = upayload->datalen;
error:
@@ -426,7 +426,7 @@ static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key,
}
static struct key *request_master_key(struct encrypted_key_payload *epayload,
- u8 **master_key, size_t *master_keylen)
+ const u8 **master_key, size_t *master_keylen)
{
struct key *mkey = NULL;
@@ -653,7 +653,7 @@ static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
{
struct key *mkey;
u8 derived_key[HASH_SIZE];
- u8 *master_key;
+ const u8 *master_key;
u8 *hmac;
const char *hex_encoded_data;
unsigned int encrypted_datalen;
@@ -837,7 +837,7 @@ static void encrypted_rcu_free(struct rcu_head *rcu)
*/
static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
{
- struct encrypted_key_payload *epayload = key->payload.data;
+ struct encrypted_key_payload *epayload = key->payload.data[0];
struct encrypted_key_payload *new_epayload;
char *buf;
char *new_master_desc = NULL;
@@ -896,7 +896,7 @@ static long encrypted_read(const struct key *key, char __user *buffer,
{
struct encrypted_key_payload *epayload;
struct key *mkey;
- u8 *master_key;
+ const u8 *master_key;
size_t master_keylen;
char derived_key[HASH_SIZE];
char *ascii_buf;
@@ -957,13 +957,13 @@ out:
*/
static void encrypted_destroy(struct key *key)
{
- struct encrypted_key_payload *epayload = key->payload.data;
+ struct encrypted_key_payload *epayload = key->payload.data[0];
if (!epayload)
return;
memset(epayload->decrypted_data, 0, epayload->decrypted_datalen);
- kfree(key->payload.data);
+ kfree(key->payload.data[0]);
}
struct key_type key_type_encrypted = {
diff --git a/security/keys/encrypted-keys/encrypted.h b/security/keys/encrypted-keys/encrypted.h
index 8136a2d44c63..47802c0de735 100644
--- a/security/keys/encrypted-keys/encrypted.h
+++ b/security/keys/encrypted-keys/encrypted.h
@@ -5,10 +5,10 @@
#if defined(CONFIG_TRUSTED_KEYS) || \
(defined(CONFIG_TRUSTED_KEYS_MODULE) && defined(CONFIG_ENCRYPTED_KEYS_MODULE))
extern struct key *request_trusted_key(const char *trusted_desc,
- u8 **master_key, size_t *master_keylen);
+ const u8 **master_key, size_t *master_keylen);
#else
static inline struct key *request_trusted_key(const char *trusted_desc,
- u8 **master_key,
+ const u8 **master_key,
size_t *master_keylen)
{
return ERR_PTR(-EOPNOTSUPP);
diff --git a/security/keys/encrypted-keys/masterkey_trusted.c b/security/keys/encrypted-keys/masterkey_trusted.c
index 013f7e5d3a2f..b5b4812dbc87 100644
--- a/security/keys/encrypted-keys/masterkey_trusted.c
+++ b/security/keys/encrypted-keys/masterkey_trusted.c
@@ -29,7 +29,7 @@
* data, trusted key type data is not visible decrypted from userspace.
*/
struct key *request_trusted_key(const char *trusted_desc,
- u8 **master_key, size_t *master_keylen)
+ const u8 **master_key, size_t *master_keylen)
{
struct trusted_key_payload *tpayload;
struct key *tkey;
@@ -39,7 +39,7 @@ struct key *request_trusted_key(const char *trusted_desc,
goto error;
down_read(&tkey->sem);
- tpayload = tkey->payload.data;
+ tpayload = tkey->payload.data[0];
*master_key = tpayload->key;
*master_keylen = tpayload->key_len;
error:
diff --git a/security/keys/key.c b/security/keys/key.c
index aee2ec5a18fc..ab7997ded725 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -278,7 +278,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->index_key.desc_len = desclen;
key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
- if (!key->description)
+ if (!key->index_key.description)
goto no_memory_3;
atomic_set(&key->usage, 1);
@@ -554,7 +554,7 @@ int key_reject_and_link(struct key *key,
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
- key->type_data.reject_error = -error;
+ key->reject_error = -error;
smp_wmb();
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
@@ -1046,14 +1046,14 @@ int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
ret = key_payload_reserve(key, prep->quotalen);
if (ret == 0) {
- key->type_data.p[0] = prep->type_data[0];
- key->type_data.p[1] = prep->type_data[1];
- rcu_assign_keypointer(key, prep->payload[0]);
- key->payload.data2[1] = prep->payload[1];
- prep->type_data[0] = NULL;
- prep->type_data[1] = NULL;
- prep->payload[0] = NULL;
- prep->payload[1] = NULL;
+ rcu_assign_keypointer(key, prep->payload.data[0]);
+ key->payload.data[1] = prep->payload.data[1];
+ key->payload.data[2] = prep->payload.data[2];
+ key->payload.data[3] = prep->payload.data[3];
+ prep->payload.data[0] = NULL;
+ prep->payload.data[1] = NULL;
+ prep->payload.data[2] = NULL;
+ prep->payload.data[3] = NULL;
}
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 0b9ec78a7a7a..fb111eafcb89 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -67,7 +67,6 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
char type[32], *description;
void *payload;
long ret;
- bool vm;
ret = -EINVAL;
if (plen > 1024 * 1024 - 1)
@@ -98,14 +97,12 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
/* pull the payload in if one was supplied */
payload = NULL;
- vm = false;
if (_payload) {
ret = -ENOMEM;
payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
if (!payload) {
if (plen <= PAGE_SIZE)
goto error2;
- vm = true;
payload = vmalloc(plen);
if (!payload)
goto error2;
@@ -138,10 +135,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
key_ref_put(keyring_ref);
error3:
- if (!vm)
- kfree(payload);
- else
- vfree(payload);
+ kvfree(payload);
error2:
kfree(description);
error:
@@ -1033,7 +1027,7 @@ long keyctl_instantiate_key_common(key_serial_t id,
if (!instkey)
goto error;
- rka = instkey->payload.data;
+ rka = instkey->payload.data[0];
if (rka->target_key->serial != id)
goto error;
@@ -1200,7 +1194,7 @@ long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error,
if (!instkey)
goto error;
- rka = instkey->payload.data;
+ rka = instkey->payload.data[0];
if (rka->target_key->serial != id)
goto error;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index d33437007ad2..f931ccfeefb0 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -118,7 +118,7 @@ static void keyring_publish_name(struct key *keyring)
if (!keyring_name_hash[bucket].next)
INIT_LIST_HEAD(&keyring_name_hash[bucket]);
- list_add_tail(&keyring->type_data.link,
+ list_add_tail(&keyring->name_link,
&keyring_name_hash[bucket]);
write_unlock(&keyring_name_lock);
@@ -387,9 +387,9 @@ static void keyring_destroy(struct key *keyring)
if (keyring->description) {
write_lock(&keyring_name_lock);
- if (keyring->type_data.link.next != NULL &&
- !list_empty(&keyring->type_data.link))
- list_del(&keyring->type_data.link);
+ if (keyring->name_link.next != NULL &&
+ !list_empty(&keyring->name_link))
+ list_del(&keyring->name_link);
write_unlock(&keyring_name_lock);
}
@@ -572,7 +572,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
/* we set a different error code if we pass a negative key */
if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
smp_rmb();
- ctx->result = ERR_PTR(key->type_data.reject_error);
+ ctx->result = ERR_PTR(key->reject_error);
kleave(" = %d [neg]", ctx->skipped_ret);
goto skipped;
}
@@ -990,7 +990,7 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
* that's readable and that hasn't been revoked */
list_for_each_entry(keyring,
&keyring_name_hash[bucket],
- type_data.link
+ name_link
) {
if (!kuid_has_mapping(current_user_ns(), keyring->user->uid))
continue;
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 43b4cddbf2b3..a3f85d2a00bb 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -457,7 +457,7 @@ key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
down_read(&cred->request_key_auth->sem);
if (key_validate(ctx->cred->request_key_auth) == 0) {
- rka = ctx->cred->request_key_auth->payload.data;
+ rka = ctx->cred->request_key_auth->payload.data[0];
ctx->cred = rka->cred;
key_ref = search_process_keyrings(ctx);
@@ -647,7 +647,7 @@ try_again:
key_ref = ERR_PTR(-EKEYREVOKED);
key = NULL;
} else {
- rka = ctx.cred->request_key_auth->payload.data;
+ rka = ctx.cred->request_key_auth->payload.data[0];
key = rka->dest_keyring;
__key_get(key);
}
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 0d6253124278..c7a117c9a8f3 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -271,7 +271,7 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
if (cred->request_key_auth) {
authkey = cred->request_key_auth;
down_read(&authkey->sem);
- rka = authkey->payload.data;
+ rka = authkey->payload.data[0];
if (!test_bit(KEY_FLAG_REVOKED,
&authkey->flags))
dest_keyring =
@@ -596,7 +596,7 @@ int wait_for_key_construction(struct key *key, bool intr)
return -ERESTARTSYS;
if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
smp_rmb();
- return key->type_data.reject_error;
+ return key->reject_error;
}
return key_validate(key);
}
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 5d672f7580dd..4f0f112fe276 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -59,7 +59,7 @@ static void request_key_auth_free_preparse(struct key_preparsed_payload *prep)
static int request_key_auth_instantiate(struct key *key,
struct key_preparsed_payload *prep)
{
- key->payload.data = (struct request_key_auth *)prep->data;
+ key->payload.data[0] = (struct request_key_auth *)prep->data;
return 0;
}
@@ -69,7 +69,7 @@ static int request_key_auth_instantiate(struct key *key,
static void request_key_auth_describe(const struct key *key,
struct seq_file *m)
{
- struct request_key_auth *rka = key->payload.data;
+ struct request_key_auth *rka = key->payload.data[0];
seq_puts(m, "key:");
seq_puts(m, key->description);
@@ -84,7 +84,7 @@ static void request_key_auth_describe(const struct key *key,
static long request_key_auth_read(const struct key *key,
char __user *buffer, size_t buflen)
{
- struct request_key_auth *rka = key->payload.data;
+ struct request_key_auth *rka = key->payload.data[0];
size_t datalen;
long ret;
@@ -110,7 +110,7 @@ static long request_key_auth_read(const struct key *key,
*/
static void request_key_auth_revoke(struct key *key)
{
- struct request_key_auth *rka = key->payload.data;
+ struct request_key_auth *rka = key->payload.data[0];
kenter("{%d}", key->serial);
@@ -125,7 +125,7 @@ static void request_key_auth_revoke(struct key *key)
*/
static void request_key_auth_destroy(struct key *key)
{
- struct request_key_auth *rka = key->payload.data;
+ struct request_key_auth *rka = key->payload.data[0];
kenter("{%d}", key->serial);
@@ -179,7 +179,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags))
goto auth_key_revoked;
- irka = cred->request_key_auth->payload.data;
+ irka = cred->request_key_auth->payload.data[0];
rka->cred = get_cred(irka->cred);
rka->pid = irka->pid;
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index c0594cb07ada..903dace648a1 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -862,12 +862,19 @@ static int datablob_parse(char *datablob, struct trusted_key_payload *p,
static struct trusted_key_options *trusted_options_alloc(void)
{
struct trusted_key_options *options;
+ int tpm2;
+
+ tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
+ if (tpm2 < 0)
+ return NULL;
options = kzalloc(sizeof *options, GFP_KERNEL);
if (options) {
/* set any non-zero defaults */
options->keytype = SRK_keytype;
- options->keyhandle = SRKHANDLE;
+
+ if (!tpm2)
+ options->keyhandle = SRKHANDLE;
}
return options;
}
@@ -905,6 +912,11 @@ static int trusted_instantiate(struct key *key,
int ret = 0;
int key_cmd;
size_t key_len;
+ int tpm2;
+
+ tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
+ if (tpm2 < 0)
+ return tpm2;
if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;
@@ -932,12 +944,20 @@ static int trusted_instantiate(struct key *key,
goto out;
}
+ if (!options->keyhandle) {
+ ret = -EINVAL;
+ goto out;
+ }
+
dump_payload(payload);
dump_options(options);
switch (key_cmd) {
case Opt_load:
- ret = key_unseal(payload, options);
+ if (tpm2)
+ ret = tpm_unseal_trusted(TPM_ANY_NUM, payload, options);
+ else
+ ret = key_unseal(payload, options);
dump_payload(payload);
dump_options(options);
if (ret < 0)
@@ -950,7 +970,10 @@ static int trusted_instantiate(struct key *key,
pr_info("trusted_key: key_create failed (%d)\n", ret);
goto out;
}
- ret = key_seal(payload, options);
+ if (tpm2)
+ ret = tpm_seal_trusted(TPM_ANY_NUM, payload, options);
+ else
+ ret = key_seal(payload, options);
if (ret < 0)
pr_info("trusted_key: key_seal failed (%d)\n", ret);
break;
@@ -984,7 +1007,7 @@ static void trusted_rcu_free(struct rcu_head *rcu)
*/
static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
{
- struct trusted_key_payload *p = key->payload.data;
+ struct trusted_key_payload *p = key->payload.data[0];
struct trusted_key_payload *new_p;
struct trusted_key_options *new_o;
size_t datalen = prep->datalen;
@@ -1018,6 +1041,13 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
kfree(new_p);
goto out;
}
+
+ if (!new_o->keyhandle) {
+ ret = -EINVAL;
+ kfree(new_p);
+ goto out;
+ }
+
/* copy old key values, and reseal with new pcrs */
new_p->migratable = p->migratable;
new_p->key_len = p->key_len;
@@ -1084,12 +1114,12 @@ static long trusted_read(const struct key *key, char __user *buffer,
*/
static void trusted_destroy(struct key *key)
{
- struct trusted_key_payload *p = key->payload.data;
+ struct trusted_key_payload *p = key->payload.data[0];
if (!p)
return;
memset(p->key, 0, p->key_len);
- kfree(key->payload.data);
+ kfree(key->payload.data[0]);
}
struct key_type key_type_trusted = {
diff --git a/security/keys/trusted.h b/security/keys/trusted.h
index 3249fbd2b653..ff001a5dcb24 100644
--- a/security/keys/trusted.h
+++ b/security/keys/trusted.h
@@ -2,7 +2,6 @@
#define __TRUSTED_KEY_H
/* implementation specific TPM constants */
-#define MAX_PCRINFO_SIZE 64
#define MAX_BUF_SIZE 512
#define TPM_GETRANDOM_SIZE 14
#define TPM_OSAP_SIZE 36
@@ -36,16 +35,6 @@ enum {
SRK_keytype = 4
};
-struct trusted_key_options {
- uint16_t keytype;
- uint32_t keyhandle;
- unsigned char keyauth[SHA1_DIGEST_SIZE];
- unsigned char blobauth[SHA1_DIGEST_SIZE];
- uint32_t pcrinfo_len;
- unsigned char pcrinfo[MAX_PCRINFO_SIZE];
- int pcrlock;
-};
-
#define TPM_DEBUG 0
#if TPM_DEBUG
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 36b47bbd3d8c..28cb30f80256 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -74,7 +74,7 @@ int user_preparse(struct key_preparsed_payload *prep)
/* attach the data */
prep->quotalen = datalen;
- prep->payload[0] = upayload;
+ prep->payload.data[0] = upayload;
upayload->datalen = datalen;
memcpy(upayload->data, prep->data, datalen);
return 0;
@@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(user_preparse);
*/
void user_free_preparse(struct key_preparsed_payload *prep)
{
- kfree(prep->payload[0]);
+ kfree(prep->payload.data[0]);
}
EXPORT_SYMBOL_GPL(user_free_preparse);
@@ -120,7 +120,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
if (ret == 0) {
/* attach the new data, displacing the old */
- zap = key->payload.data;
+ zap = key->payload.data[0];
rcu_assign_keypointer(key, upayload);
key->expiry = 0;
}
@@ -140,7 +140,7 @@ EXPORT_SYMBOL_GPL(user_update);
*/
void user_revoke(struct key *key)
{
- struct user_key_payload *upayload = key->payload.data;
+ struct user_key_payload *upayload = key->payload.data[0];
/* clear the quota */
key_payload_reserve(key, 0);
@@ -158,7 +158,7 @@ EXPORT_SYMBOL(user_revoke);
*/
void user_destroy(struct key *key)
{
- struct user_key_payload *upayload = key->payload.data;
+ struct user_key_payload *upayload = key->payload.data[0];
kfree(upayload);
}
@@ -183,10 +183,10 @@ EXPORT_SYMBOL_GPL(user_describe);
*/
long user_read(const struct key *key, char __user *buffer, size_t buflen)
{
- struct user_key_payload *upayload;
+ const struct user_key_payload *upayload;
long ret;
- upayload = rcu_dereference_key(key);
+ upayload = user_key_payload(key);
ret = upayload->datalen;
/* we can return the data as is */
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index bca1b74a4a2f..8691e92f27e5 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -78,7 +78,7 @@ config SECURITY_SELINUX_CHECKREQPROT_VALUE
int "NSA SELinux checkreqprot default value"
depends on SECURITY_SELINUX
range 0 1
- default 1
+ default 0
help
This option sets the default value for the 'checkreqprot' flag
that determines whether SELinux checks the protection requested
@@ -92,7 +92,7 @@ config SECURITY_SELINUX_CHECKREQPROT_VALUE
'checkreqprot=' boot parameter. It may also be changed at runtime
via /selinux/checkreqprot if authorized by policy.
- If you are unsure how to answer this question, answer 1.
+ If you are unsure how to answer this question, answer 0.
config SECURITY_SELINUX_POLICYDB_VERSION_MAX
bool "NSA SELinux maximum supported policy format version"
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 26f4039d54b8..9e591e5989be 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -126,6 +126,7 @@ int selinux_enabled = 1;
#endif
static struct kmem_cache *sel_inode_cache;
+static struct kmem_cache *file_security_cache;
/**
* selinux_secmark_enabled - Check to see if SECMARK is currently enabled
@@ -287,7 +288,7 @@ static int file_alloc_security(struct file *file)
struct file_security_struct *fsec;
u32 sid = current_sid();
- fsec = kzalloc(sizeof(struct file_security_struct), GFP_KERNEL);
+ fsec = kmem_cache_zalloc(file_security_cache, GFP_KERNEL);
if (!fsec)
return -ENOMEM;
@@ -302,7 +303,7 @@ static void file_free_security(struct file *file)
{
struct file_security_struct *fsec = file->f_security;
file->f_security = NULL;
- kfree(fsec);
+ kmem_cache_free(file_security_cache, fsec);
}
static int superblock_alloc_security(struct super_block *sb)
@@ -674,10 +675,9 @@ static int selinux_set_mnt_opts(struct super_block *sb,
if (flags[i] == SBLABEL_MNT)
continue;
- rc = security_context_to_sid(mount_options[i],
- strlen(mount_options[i]), &sid, GFP_KERNEL);
+ rc = security_context_str_to_sid(mount_options[i], &sid, GFP_KERNEL);
if (rc) {
- printk(KERN_WARNING "SELinux: security_context_to_sid"
+ printk(KERN_WARNING "SELinux: security_context_str_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
mount_options[i], sb->s_id, name, rc);
goto out;
@@ -2617,15 +2617,12 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
for (i = 0; i < opts.num_mnt_opts; i++) {
u32 sid;
- size_t len;
if (flags[i] == SBLABEL_MNT)
continue;
- len = strlen(mount_options[i]);
- rc = security_context_to_sid(mount_options[i], len, &sid,
- GFP_KERNEL);
+ rc = security_context_str_to_sid(mount_options[i], &sid, GFP_KERNEL);
if (rc) {
- printk(KERN_WARNING "SELinux: security_context_to_sid"
+ printk(KERN_WARNING "SELinux: security_context_str_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
mount_options[i], sb->s_id, sb->s_type->name, rc);
goto out_free_opts;
@@ -2946,7 +2943,8 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
return dentry_has_perm(cred, dentry, FILE__SETATTR);
- if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE))
+ if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE)
+ && !(ia_valid & ATTR_FILE))
av |= FILE__OPEN;
return dentry_has_perm(cred, dentry, av);
@@ -3166,7 +3164,7 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
if (!value || !size)
return -EACCES;
- rc = security_context_to_sid((void *)value, size, &newsid, GFP_KERNEL);
+ rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL);
if (rc)
return rc;
@@ -3238,7 +3236,7 @@ static void selinux_file_free_security(struct file *file)
* Check whether a task has the ioctl permission and cmd
* operation to an inode.
*/
-int ioctl_has_perm(const struct cred *cred, struct file *file,
+static int ioctl_has_perm(const struct cred *cred, struct file *file,
u32 requested, u16 cmd)
{
struct common_audit_data ad;
@@ -6093,6 +6091,9 @@ static __init int selinux_init(void)
sel_inode_cache = kmem_cache_create("selinux_inode_security",
sizeof(struct inode_security_struct),
0, SLAB_PANIC, NULL);
+ file_security_cache = kmem_cache_create("selinux_file_security",
+ sizeof(struct file_security_struct),
+ 0, SLAB_PANIC, NULL);
avc_init();
security_add_hooks(selinux_hooks, ARRAY_SIZE(selinux_hooks));
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 6a681d26bf20..223e9fd15d66 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -166,6 +166,8 @@ int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len);
int security_context_to_sid(const char *scontext, u32 scontext_len,
u32 *out_sid, gfp_t gfp);
+int security_context_str_to_sid(const char *scontext, u32 *out_sid, gfp_t gfp);
+
int security_context_to_sid_default(const char *scontext, u32 scontext_len,
u32 *out_sid, u32 def_sid, gfp_t gfp_flags);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 5bed7716f8ab..c02da25d7b63 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -731,13 +731,11 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
- GFP_KERNEL);
+ length = security_context_str_to_sid(scon, &ssid, GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
- GFP_KERNEL);
+ length = security_context_str_to_sid(tcon, &tsid, GFP_KERNEL);
if (length)
goto out;
@@ -819,13 +817,11 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
objname = namebuf;
}
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
- GFP_KERNEL);
+ length = security_context_str_to_sid(scon, &ssid, GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
- GFP_KERNEL);
+ length = security_context_str_to_sid(tcon, &tsid, GFP_KERNEL);
if (length)
goto out;
@@ -882,13 +878,11 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
- GFP_KERNEL);
+ length = security_context_str_to_sid(scon, &ssid, GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
- GFP_KERNEL);
+ length = security_context_str_to_sid(tcon, &tsid, GFP_KERNEL);
if (length)
goto out;
@@ -940,7 +934,7 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s", con, user) != 2)
goto out;
- length = security_context_to_sid(con, strlen(con) + 1, &sid, GFP_KERNEL);
+ length = security_context_str_to_sid(con, &sid, GFP_KERNEL);
if (length)
goto out;
@@ -1000,13 +994,11 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
- GFP_KERNEL);
+ length = security_context_str_to_sid(scon, &ssid, GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
- GFP_KERNEL);
+ length = security_context_str_to_sid(tcon, &tsid, GFP_KERNEL);
if (length)
goto out;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b7df12ba61d8..ebb5eb3c318c 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1218,13 +1218,10 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
/*
* Copy the user name, role name and type name into the context.
*/
- sprintf(scontextp, "%s:%s:%s",
+ scontextp += sprintf(scontextp, "%s:%s:%s",
sym_name(&policydb, SYM_USERS, context->user - 1),
sym_name(&policydb, SYM_ROLES, context->role - 1),
sym_name(&policydb, SYM_TYPES, context->type - 1));
- scontextp += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) +
- 1 + strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) +
- 1 + strlen(sym_name(&policydb, SYM_TYPES, context->type - 1));
mls_sid_to_context(context, &scontextp);
@@ -1259,12 +1256,12 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
*scontext_len = strlen(initial_sid_to_string[sid]) + 1;
if (!scontext)
goto out;
- scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
+ scontextp = kmemdup(initial_sid_to_string[sid],
+ *scontext_len, GFP_ATOMIC);
if (!scontextp) {
rc = -ENOMEM;
goto out;
}
- strcpy(scontextp, initial_sid_to_string[sid]);
*scontext = scontextp;
goto out;
}
@@ -1476,6 +1473,11 @@ int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid,
sid, SECSID_NULL, gfp, 0);
}
+int security_context_str_to_sid(const char *scontext, u32 *sid, gfp_t gfp)
+{
+ return security_context_to_sid(scontext, strlen(scontext), sid, gfp);
+}
+
/**
* security_context_to_sid_default - Obtain a SID for a given security context,
* falling back to specified default if needed.
@@ -2604,18 +2606,12 @@ int security_get_bools(int *len, char ***names, int **values)
goto err;
for (i = 0; i < *len; i++) {
- size_t name_len;
-
(*values)[i] = policydb.bool_val_to_struct[i]->state;
- name_len = strlen(sym_name(&policydb, SYM_BOOLS, i)) + 1;
rc = -ENOMEM;
- (*names)[i] = kmalloc(sizeof(char) * name_len, GFP_ATOMIC);
+ (*names)[i] = kstrdup(sym_name(&policydb, SYM_BOOLS, i), GFP_ATOMIC);
if (!(*names)[i])
goto err;
-
- strncpy((*names)[i], sym_name(&policydb, SYM_BOOLS, i), name_len);
- (*names)[i][name_len - 1] = 0;
}
rc = 0;
out:
diff --git a/security/smack/smack.h b/security/smack/smack.h
index fff0c612bbb7..6c91156ae225 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -115,6 +115,7 @@ struct task_smack {
struct smack_known *smk_forked; /* label when forked */
struct list_head smk_rules; /* per task access rules */
struct mutex smk_rules_lock; /* lock for the rules */
+ struct list_head smk_relabel; /* transit allowed labels */
};
#define SMK_INODE_INSTANT 0x01 /* inode is instantiated */
@@ -169,7 +170,7 @@ struct smk_port_label {
};
#endif /* SMACK_IPV6_PORT_LABELING */
-struct smack_onlycap {
+struct smack_known_list_elem {
struct list_head list;
struct smack_known *smk_label;
};
@@ -301,6 +302,7 @@ struct smack_known *smk_import_entry(const char *, int);
void smk_insert_entry(struct smack_known *skp);
struct smack_known *smk_find_entry(const char *);
int smack_privileged(int cap);
+void smk_destroy_label_list(struct list_head *list);
/*
* Shared data.
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index bc1053fb5d1d..a283f9e796c1 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -637,7 +637,7 @@ DEFINE_MUTEX(smack_onlycap_lock);
int smack_privileged(int cap)
{
struct smack_known *skp = smk_of_current();
- struct smack_onlycap *sop;
+ struct smack_known_list_elem *sklep;
/*
* All kernel tasks are privileged
@@ -654,8 +654,8 @@ int smack_privileged(int cap)
return 1;
}
- list_for_each_entry_rcu(sop, &smack_onlycap_list, list) {
- if (sop->smk_label == skp) {
+ list_for_each_entry_rcu(sklep, &smack_onlycap_list, list) {
+ if (sklep->smk_label == skp) {
rcu_read_unlock();
return 1;
}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 996c88956438..ff81026f6ddb 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -52,7 +52,7 @@
#define SMK_SENDING 2
#ifdef SMACK_IPV6_PORT_LABELING
-LIST_HEAD(smk_ipv6_port_list);
+static LIST_HEAD(smk_ipv6_port_list);
#endif
static struct kmem_cache *smack_inode_cache;
int smack_enabled;
@@ -326,6 +326,7 @@ static struct task_smack *new_task_smack(struct smack_known *task,
tsp->smk_task = task;
tsp->smk_forked = forked;
INIT_LIST_HEAD(&tsp->smk_rules);
+ INIT_LIST_HEAD(&tsp->smk_relabel);
mutex_init(&tsp->smk_rules_lock);
return tsp;
@@ -361,6 +362,35 @@ static int smk_copy_rules(struct list_head *nhead, struct list_head *ohead,
}
/**
+ * smk_copy_relabel - copy smk_relabel labels list
+ * @nhead: new rules header pointer
+ * @ohead: old rules header pointer
+ * @gfp: type of the memory for the allocation
+ *
+ * Returns 0 on success, -ENOMEM on error
+ */
+static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
+ gfp_t gfp)
+{
+ struct smack_known_list_elem *nklep;
+ struct smack_known_list_elem *oklep;
+
+ INIT_LIST_HEAD(nhead);
+
+ list_for_each_entry(oklep, ohead, list) {
+ nklep = kzalloc(sizeof(struct smack_known_list_elem), gfp);
+ if (nklep == NULL) {
+ smk_destroy_label_list(nhead);
+ return -ENOMEM;
+ }
+ nklep->smk_label = oklep->smk_label;
+ list_add(&nklep->list, nhead);
+ }
+
+ return 0;
+}
+
+/**
* smk_ptrace_mode - helper function for converting PTRACE_MODE_* into MAY_*
* @mode - input mode in form of PTRACE_MODE_*
*
@@ -1922,6 +1952,8 @@ static void smack_cred_free(struct cred *cred)
return;
cred->security = NULL;
+ smk_destroy_label_list(&tsp->smk_relabel);
+
list_for_each_safe(l, n, &tsp->smk_rules) {
rp = list_entry(l, struct smack_rule, list);
list_del(&rp->list);
@@ -1953,6 +1985,11 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old,
if (rc != 0)
return rc;
+ rc = smk_copy_relabel(&new_tsp->smk_relabel, &old_tsp->smk_relabel,
+ gfp);
+ if (rc != 0)
+ return rc;
+
new->security = new_tsp;
return 0;
}
@@ -3354,6 +3391,9 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
*/
isp->smk_inode = smk_of_current();
break;
+ case PIPEFS_MAGIC:
+ isp->smk_inode = smk_of_current();
+ break;
default:
isp->smk_inode = sbsp->smk_root;
break;
@@ -3549,9 +3589,11 @@ static int smack_getprocattr(struct task_struct *p, char *name, char **value)
static int smack_setprocattr(struct task_struct *p, char *name,
void *value, size_t size)
{
- struct task_smack *tsp;
+ struct task_smack *tsp = current_security();
struct cred *new;
struct smack_known *skp;
+ struct smack_known_list_elem *sklep;
+ int rc;
/*
* Changing another process' Smack value is too dangerous
@@ -3560,7 +3602,7 @@ static int smack_setprocattr(struct task_struct *p, char *name,
if (p != current)
return -EPERM;
- if (!smack_privileged(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN) && list_empty(&tsp->smk_relabel))
return -EPERM;
if (value == NULL || size == 0 || size >= SMK_LONGLABEL)
@@ -3579,12 +3621,27 @@ static int smack_setprocattr(struct task_struct *p, char *name,
if (skp == &smack_known_web)
return -EPERM;
+ if (!smack_privileged(CAP_MAC_ADMIN)) {
+ rc = -EPERM;
+ list_for_each_entry(sklep, &tsp->smk_relabel, list)
+ if (sklep->smk_label == skp) {
+ rc = 0;
+ break;
+ }
+ if (rc)
+ return rc;
+ }
+
new = prepare_creds();
if (new == NULL)
return -ENOMEM;
tsp = new->security;
tsp->smk_task = skp;
+ /*
+ * process can change its label only once
+ */
+ smk_destroy_label_list(&tsp->smk_relabel);
commit_creds(new);
return size;
@@ -4708,8 +4765,6 @@ static __init int smack_init(void)
if (!security_module_enable("smack"))
return 0;
- smack_enabled = 1;
-
smack_inode_cache = KMEM_CACHE(inode_smack, 0);
if (!smack_inode_cache)
return -ENOMEM;
@@ -4721,6 +4776,8 @@ static __init int smack_init(void)
return -ENOMEM;
}
+ smack_enabled = 1;
+
pr_info("Smack: Initializing.\n");
#ifdef CONFIG_SECURITY_SMACK_NETFILTER
pr_info("Smack: Netfilter enabled.\n");
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index c20b154a33f2..94bd9e41c9ec 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -61,6 +61,7 @@ enum smk_inos {
#if IS_ENABLED(CONFIG_IPV6)
SMK_NET6ADDR = 23, /* single label IPv6 hosts */
#endif /* CONFIG_IPV6 */
+ SMK_RELABEL_SELF = 24, /* relabel possible without CAP_MAC_ADMIN */
};
/*
@@ -1501,8 +1502,8 @@ static ssize_t smk_write_net6addr(struct file *file, const char __user *buf,
*/
if (smack[0] != '-') {
skp = smk_import_entry(smack, 0);
- if (skp == NULL) {
- rc = -EINVAL;
+ if (IS_ERR(skp)) {
+ rc = PTR_ERR(skp);
goto free_out;
}
} else {
@@ -1914,10 +1915,10 @@ static void *onlycap_seq_next(struct seq_file *s, void *v, loff_t *pos)
static int onlycap_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
- struct smack_onlycap *sop =
- list_entry_rcu(list, struct smack_onlycap, list);
+ struct smack_known_list_elem *sklep =
+ list_entry_rcu(list, struct smack_known_list_elem, list);
- seq_puts(s, sop->smk_label->smk_known);
+ seq_puts(s, sklep->smk_label->smk_known);
seq_putc(s, ' ');
return 0;
@@ -1974,6 +1975,54 @@ static void smk_list_swap_rcu(struct list_head *public,
}
/**
+ * smk_parse_label_list - parse list of Smack labels, separated by spaces
+ *
+ * @data: the string to parse
+ * @private: destination list
+ *
+ * Returns zero on success or error code, as appropriate
+ */
+static int smk_parse_label_list(char *data, struct list_head *list)
+{
+ char *tok;
+ struct smack_known *skp;
+ struct smack_known_list_elem *sklep;
+
+ while ((tok = strsep(&data, " ")) != NULL) {
+ if (!*tok)
+ continue;
+
+ skp = smk_import_entry(tok, 0);
+ if (IS_ERR(skp))
+ return PTR_ERR(skp);
+
+ sklep = kzalloc(sizeof(*sklep), GFP_KERNEL);
+ if (sklep == NULL)
+ return -ENOMEM;
+
+ sklep->smk_label = skp;
+ list_add(&sklep->list, list);
+ }
+
+ return 0;
+}
+
+/**
+ * smk_destroy_label_list - destroy a list of smack_known_list_elem
+ * @head: header pointer of the list to destroy
+ */
+void smk_destroy_label_list(struct list_head *list)
+{
+ struct smack_known_list_elem *sklep;
+ struct smack_known_list_elem *sklep2;
+
+ list_for_each_entry_safe(sklep, sklep2, list, list)
+ kfree(sklep);
+
+ INIT_LIST_HEAD(list);
+}
+
+/**
* smk_write_onlycap - write() for smackfs/onlycap
* @file: file pointer, not actually used
* @buf: where to get the data from
@@ -1986,13 +2035,8 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
char *data;
- char *data_parse;
- char *tok;
- struct smack_known *skp;
- struct smack_onlycap *sop;
- struct smack_onlycap *sop2;
LIST_HEAD(list_tmp);
- int rc = count;
+ int rc;
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
@@ -2006,26 +2050,7 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
return -EFAULT;
}
- data_parse = data;
- while ((tok = strsep(&data_parse, " ")) != NULL) {
- if (!*tok)
- continue;
-
- skp = smk_import_entry(tok, 0);
- if (IS_ERR(skp)) {
- rc = PTR_ERR(skp);
- break;
- }
-
- sop = kzalloc(sizeof(*sop), GFP_KERNEL);
- if (sop == NULL) {
- rc = -ENOMEM;
- break;
- }
-
- sop->smk_label = skp;
- list_add_rcu(&sop->list, &list_tmp);
- }
+ rc = smk_parse_label_list(data, &list_tmp);
kfree(data);
/*
@@ -2038,17 +2063,14 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
* But do so only on invalid label, not on system errors.
* The invalid label must be first to count as clearing attempt.
*/
- if (rc == -EINVAL && list_empty(&list_tmp))
- rc = count;
-
- if (rc >= 0) {
+ if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) {
mutex_lock(&smack_onlycap_lock);
smk_list_swap_rcu(&smack_onlycap_list, &list_tmp);
mutex_unlock(&smack_onlycap_lock);
+ rc = count;
}
- list_for_each_entry_safe(sop, sop2, &list_tmp, list)
- kfree(sop);
+ smk_destroy_label_list(&list_tmp);
return rc;
}
@@ -2698,6 +2720,113 @@ static const struct file_operations smk_syslog_ops = {
.llseek = default_llseek,
};
+/*
+ * Seq_file read operations for /smack/relabel-self
+ */
+
+static void *relabel_self_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_seq_start(s, pos, &tsp->smk_relabel);
+}
+
+static void *relabel_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_seq_next(s, v, pos, &tsp->smk_relabel);
+}
+
+static int relabel_self_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_known_list_elem *sklep =
+ list_entry(list, struct smack_known_list_elem, list);
+
+ seq_puts(s, sklep->smk_label->smk_known);
+ seq_putc(s, ' ');
+
+ return 0;
+}
+
+static const struct seq_operations relabel_self_seq_ops = {
+ .start = relabel_self_seq_start,
+ .next = relabel_self_seq_next,
+ .show = relabel_self_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_relabel_self - open() for /smack/relabel-self
+ * @inode: inode structure representing file
+ * @file: "relabel-self" file pointer
+ *
+ * Connect our relabel_self_seq_* operations with /smack/relabel-self
+ * file_operations
+ */
+static int smk_open_relabel_self(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &relabel_self_seq_ops);
+}
+
+/**
+ * smk_write_relabel_self - write() for /smack/relabel-self
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_smack *tsp = current_security();
+ char *data;
+ int rc;
+ LIST_HEAD(list_tmp);
+
+ /*
+ * Must have privilege.
+ */
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ /*
+ * Enough data must be present.
+ */
+ if (*ppos != 0)
+ return -EINVAL;
+
+ data = kzalloc(count + 1, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, buf, count) != 0) {
+ kfree(data);
+ return -EFAULT;
+ }
+
+ rc = smk_parse_label_list(data, &list_tmp);
+ kfree(data);
+
+ if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) {
+ smk_destroy_label_list(&tsp->smk_relabel);
+ list_splice(&list_tmp, &tsp->smk_relabel);
+ return count;
+ }
+
+ smk_destroy_label_list(&list_tmp);
+ return rc;
+}
+
+static const struct file_operations smk_relabel_self_ops = {
+ .open = smk_open_relabel_self,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_relabel_self,
+ .release = seq_release,
+};
/**
* smk_read_ptrace - read() for /smack/ptrace
@@ -2824,6 +2953,9 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
[SMK_NET6ADDR] = {
"ipv6host", &smk_net6addr_ops, S_IRUGO|S_IWUSR},
#endif /* CONFIG_IPV6 */
+ [SMK_RELABEL_SELF] = {
+ "relabel-self", &smk_relabel_self_ops,
+ S_IRUGO|S_IWUGO},
/* last one */
{""}
};
@@ -2892,7 +3024,7 @@ static int __init init_smk_fs(void)
int err;
int rc;
- if (!security_module_enable("smack"))
+ if (smack_enabled == 0)
return 0;
err = smk_init_sysfs();
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
index 95f0bec26a1b..e2ce6c4d7ece 100644
--- a/sound/soc/codecs/ad1836.c
+++ b/sound/soc/codecs/ad1836.c
@@ -404,7 +404,6 @@ MODULE_DEVICE_TABLE(spi, ad1836_ids);
static struct spi_driver ad1836_spi_driver = {
.driver = {
.name = "ad1836",
- .owner = THIS_MODULE,
},
.probe = ad1836_spi_probe,
.remove = ad1836_spi_remove,
diff --git a/sound/soc/codecs/ad193x-spi.c b/sound/soc/codecs/ad193x-spi.c
index 390cef9b9dc2..8199a3de0024 100644
--- a/sound/soc/codecs/ad193x-spi.c
+++ b/sound/soc/codecs/ad193x-spi.c
@@ -36,7 +36,6 @@ static int ad193x_spi_remove(struct spi_device *spi)
static struct spi_driver ad193x_spi_driver = {
.driver = {
.name = "ad193x",
- .owner = THIS_MODULE,
},
.probe = ad193x_spi_probe,
.remove = ad193x_spi_remove,
diff --git a/sound/soc/codecs/adau1761-spi.c b/sound/soc/codecs/adau1761-spi.c
index cce2f11f1ffb..8bc1fbd25fcc 100644
--- a/sound/soc/codecs/adau1761-spi.c
+++ b/sound/soc/codecs/adau1761-spi.c
@@ -64,7 +64,6 @@ MODULE_DEVICE_TABLE(spi, adau1761_spi_id);
static struct spi_driver adau1761_spi_driver = {
.driver = {
.name = "adau1761",
- .owner = THIS_MODULE,
},
.probe = adau1761_spi_probe,
.remove = adau1761_spi_remove,
diff --git a/sound/soc/codecs/adau1781-spi.c b/sound/soc/codecs/adau1781-spi.c
index 194686716bbe..33a73ff78de4 100644
--- a/sound/soc/codecs/adau1781-spi.c
+++ b/sound/soc/codecs/adau1781-spi.c
@@ -62,7 +62,6 @@ MODULE_DEVICE_TABLE(spi, adau1781_spi_id);
static struct spi_driver adau1781_spi_driver = {
.driver = {
.name = "adau1781",
- .owner = THIS_MODULE,
},
.probe = adau1781_spi_probe,
.remove = adau1781_spi_remove,
diff --git a/sound/soc/codecs/adau1977-spi.c b/sound/soc/codecs/adau1977-spi.c
index b05cf5da3a94..0b46d88b481c 100644
--- a/sound/soc/codecs/adau1977-spi.c
+++ b/sound/soc/codecs/adau1977-spi.c
@@ -63,7 +63,6 @@ MODULE_DEVICE_TABLE(spi, adau1977_spi_ids);
static struct spi_driver adau1977_spi_driver = {
.driver = {
.name = "adau1977",
- .owner = THIS_MODULE,
},
.probe = adau1977_spi_probe,
.remove = adau1977_spi_remove,
diff --git a/sound/soc/codecs/adav801.c b/sound/soc/codecs/adav801.c
index 790fce33ab10..055f1228c2b4 100644
--- a/sound/soc/codecs/adav801.c
+++ b/sound/soc/codecs/adav801.c
@@ -39,7 +39,6 @@ static int adav80x_spi_remove(struct spi_device *spi)
static struct spi_driver adav80x_spi_driver = {
.driver = {
.name = "adav801",
- .owner = THIS_MODULE,
},
.probe = adav80x_spi_probe,
.remove = adav80x_spi_remove,
diff --git a/sound/soc/codecs/ak4104.c b/sound/soc/codecs/ak4104.c
index 1fd7f72b2a62..595d02d7602c 100644
--- a/sound/soc/codecs/ak4104.c
+++ b/sound/soc/codecs/ak4104.c
@@ -344,7 +344,6 @@ MODULE_DEVICE_TABLE(spi, ak4104_id_table);
static struct spi_driver ak4104_spi_driver = {
.driver = {
.name = "ak4104",
- .owner = THIS_MODULE,
.of_match_table = ak4104_of_match,
},
.id_table = ak4104_id_table,
diff --git a/sound/soc/codecs/cs4271-spi.c b/sound/soc/codecs/cs4271-spi.c
index acd49d86e706..1ff5f520196a 100644
--- a/sound/soc/codecs/cs4271-spi.c
+++ b/sound/soc/codecs/cs4271-spi.c
@@ -42,7 +42,6 @@ static int cs4271_spi_remove(struct spi_device *spi)
static struct spi_driver cs4271_spi_driver = {
.driver = {
.name = "cs4271",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(cs4271_dt_ids),
},
.probe = cs4271_spi_probe,
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index 7dc52fe67c80..af23a61b7b28 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -1339,7 +1339,6 @@ static int da7210_spi_remove(struct spi_device *spi)
static struct spi_driver da7210_spi_driver = {
.driver = {
.name = "da7210",
- .owner = THIS_MODULE,
},
.probe = da7210_spi_probe,
.remove = da7210_spi_remove
diff --git a/sound/soc/codecs/pcm1792a.c b/sound/soc/codecs/pcm1792a.c
index 57b0c94a710b..08bb4863e96f 100644
--- a/sound/soc/codecs/pcm1792a.c
+++ b/sound/soc/codecs/pcm1792a.c
@@ -257,7 +257,6 @@ MODULE_DEVICE_TABLE(spi, pcm1792a_spi_ids);
static struct spi_driver pcm1792a_codec_driver = {
.driver = {
.name = "pcm1792a",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pcm1792a_of_match),
},
.id_table = pcm1792a_spi_ids,
diff --git a/sound/soc/codecs/pcm512x-spi.c b/sound/soc/codecs/pcm512x-spi.c
index 7b64a9cef704..712ed6598c48 100644
--- a/sound/soc/codecs/pcm512x-spi.c
+++ b/sound/soc/codecs/pcm512x-spi.c
@@ -64,7 +64,6 @@ static struct spi_driver pcm512x_spi_driver = {
.id_table = pcm512x_spi_id,
.driver = {
.name = "pcm512x",
- .owner = THIS_MODULE,
.of_match_table = pcm512x_of_match,
.pm = &pcm512x_pm_ops,
},
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index 3505aafbade4..91879ea95415 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -232,7 +232,6 @@ static int rt5677_spi_probe(struct spi_device *spi)
static struct spi_driver rt5677_spi_driver = {
.driver = {
.name = "rt5677",
- .owner = THIS_MODULE,
},
.probe = rt5677_spi_probe,
};
diff --git a/sound/soc/codecs/ssm2602-spi.c b/sound/soc/codecs/ssm2602-spi.c
index b5df14fbe3ad..842f373045c6 100644
--- a/sound/soc/codecs/ssm2602-spi.c
+++ b/sound/soc/codecs/ssm2602-spi.c
@@ -35,7 +35,6 @@ MODULE_DEVICE_TABLE(of, ssm2602_of_match);
static struct spi_driver ssm2602_spi_driver = {
.driver = {
.name = "ssm2602",
- .owner = THIS_MODULE,
.of_match_table = ssm2602_of_match,
},
.probe = ssm2602_spi_probe,
diff --git a/sound/soc/codecs/tlv320aic23-spi.c b/sound/soc/codecs/tlv320aic23-spi.c
index 3b387e41d75d..f801ae051658 100644
--- a/sound/soc/codecs/tlv320aic23-spi.c
+++ b/sound/soc/codecs/tlv320aic23-spi.c
@@ -43,7 +43,6 @@ static int aic23_spi_remove(struct spi_device *spi)
static struct spi_driver aic23_spi = {
.driver = {
.name = "tlv320aic23",
- .owner = THIS_MODULE,
},
.probe = aic23_spi_probe,
.remove = aic23_spi_remove,
diff --git a/sound/soc/codecs/tlv320aic26.c b/sound/soc/codecs/tlv320aic26.c
index 620ab9ea1ef0..2c904d7150ad 100644
--- a/sound/soc/codecs/tlv320aic26.c
+++ b/sound/soc/codecs/tlv320aic26.c
@@ -373,7 +373,6 @@ static int aic26_spi_remove(struct spi_device *spi)
static struct spi_driver aic26_spi = {
.driver = {
.name = "tlv320aic26-codec",
- .owner = THIS_MODULE,
},
.probe = aic26_spi_probe,
.remove = aic26_spi_remove,
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index 581ec1502228..e3c34bdc2772 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -998,7 +998,6 @@ static int wm0010_spi_remove(struct spi_device *spi)
static struct spi_driver wm0010_spi_driver = {
.driver = {
.name = "wm0010",
- .owner = THIS_MODULE,
},
.probe = wm0010_spi_probe,
.remove = wm0010_spi_remove,
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index b098a83a44d8..99e40e629cca 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -644,7 +644,6 @@ static int wm8510_spi_remove(struct spi_device *spi)
static struct spi_driver wm8510_spi_driver = {
.driver = {
.name = "wm8510",
- .owner = THIS_MODULE,
.of_match_table = wm8510_of_match,
},
.probe = wm8510_spi_probe,
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index 44b9e0ae7451..c759ec068e97 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -431,7 +431,6 @@ static int wm8711_spi_remove(struct spi_device *spi)
static struct spi_driver wm8711_spi_driver = {
.driver = {
.name = "wm8711",
- .owner = THIS_MODULE,
.of_match_table = wm8711_of_match,
},
.probe = wm8711_spi_probe,
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index cd7b02413ccf..1564e6926527 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -272,7 +272,6 @@ static int wm8728_spi_remove(struct spi_device *spi)
static struct spi_driver wm8728_spi_driver = {
.driver = {
.name = "wm8728",
- .owner = THIS_MODULE,
.of_match_table = wm8728_of_match,
},
.probe = wm8728_spi_probe,
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index ace8645245a0..15bd547e3c84 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -718,7 +718,6 @@ static int wm8731_spi_remove(struct spi_device *spi)
static struct spi_driver wm8731_spi_driver = {
.driver = {
.name = "wm8731",
- .owner = THIS_MODULE,
.of_match_table = wm8731_of_match,
},
.probe = wm8731_spi_probe,
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
index e4a03d98aed4..e7807601e675 100644
--- a/sound/soc/codecs/wm8737.c
+++ b/sound/soc/codecs/wm8737.c
@@ -707,7 +707,6 @@ static int wm8737_spi_remove(struct spi_device *spi)
static struct spi_driver wm8737_spi_driver = {
.driver = {
.name = "wm8737",
- .owner = THIS_MODULE,
.of_match_table = wm8737_of_match,
},
.probe = wm8737_spi_probe,
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index de42c0388772..36ef91fe0511 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -657,7 +657,6 @@ static int wm8741_spi_remove(struct spi_device *spi)
static struct spi_driver wm8741_spi_driver = {
.driver = {
.name = "wm8741",
- .owner = THIS_MODULE,
.of_match_table = wm8741_of_match,
},
.probe = wm8741_spi_probe,
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index 873933a7966f..bd9dcd2161bc 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -777,7 +777,6 @@ MODULE_DEVICE_TABLE(spi, wm8750_spi_ids);
static struct spi_driver wm8750_spi_driver = {
.driver = {
.name = "wm8750",
- .owner = THIS_MODULE,
.of_match_table = wm8750_of_match,
},
.id_table = wm8750_spi_ids,
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index a801c6d75436..61299ca372ff 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1549,7 +1549,6 @@ static int wm8753_spi_remove(struct spi_device *spi)
static struct spi_driver wm8753_spi_driver = {
.driver = {
.name = "wm8753",
- .owner = THIS_MODULE,
.of_match_table = wm8753_of_match,
},
.probe = wm8753_spi_probe,
diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
index 66c1f151071d..df6178464b00 100644
--- a/sound/soc/codecs/wm8770.c
+++ b/sound/soc/codecs/wm8770.c
@@ -703,7 +703,6 @@ static int wm8770_spi_remove(struct spi_device *spi)
static struct spi_driver wm8770_spi_driver = {
.driver = {
.name = "wm8770",
- .owner = THIS_MODULE,
.of_match_table = wm8770_of_match,
},
.probe = wm8770_spi_probe,
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index 183c9a4966c5..5af44f9a8cf2 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -488,7 +488,6 @@ static int wm8776_spi_remove(struct spi_device *spi)
static struct spi_driver wm8776_spi_driver = {
.driver = {
.name = "wm8776",
- .owner = THIS_MODULE,
.of_match_table = wm8776_of_match,
},
.probe = wm8776_spi_probe,
diff --git a/sound/soc/codecs/wm8804-spi.c b/sound/soc/codecs/wm8804-spi.c
index 407a3cf391e5..9998c78a2325 100644
--- a/sound/soc/codecs/wm8804-spi.c
+++ b/sound/soc/codecs/wm8804-spi.c
@@ -42,7 +42,6 @@ MODULE_DEVICE_TABLE(of, wm8804_of_match);
static struct spi_driver wm8804_spi_driver = {
.driver = {
.name = "wm8804",
- .owner = THIS_MODULE,
.pm = &wm8804_pm,
.of_match_table = wm8804_of_match,
},
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index 98900aa66dc3..5d8dca88d612 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -1266,7 +1266,6 @@ static int wm8900_spi_remove(struct spi_device *spi)
static struct spi_driver wm8900_spi_driver = {
.driver = {
.name = "wm8900",
- .owner = THIS_MODULE,
},
.probe = wm8900_spi_probe,
.remove = wm8900_spi_remove,
diff --git a/sound/soc/codecs/wm8983.c b/sound/soc/codecs/wm8983.c
index f3193fb751cc..7350ff654bbf 100644
--- a/sound/soc/codecs/wm8983.c
+++ b/sound/soc/codecs/wm8983.c
@@ -1033,7 +1033,6 @@ static int wm8983_spi_remove(struct spi_device *spi)
static struct spi_driver wm8983_spi_driver = {
.driver = {
.name = "wm8983",
- .owner = THIS_MODULE,
},
.probe = wm8983_spi_probe,
.remove = wm8983_spi_remove
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index 9c3c1517a4f3..9918152a03c7 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -1096,7 +1096,6 @@ static int wm8985_spi_remove(struct spi_device *spi)
static struct spi_driver wm8985_spi_driver = {
.driver = {
.name = "wm8985",
- .owner = THIS_MODULE,
},
.probe = wm8985_spi_probe,
.remove = wm8985_spi_remove
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index c88ce99ce9e1..895721a256f0 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -871,7 +871,6 @@ static int wm8988_spi_remove(struct spi_device *spi)
static struct spi_driver wm8988_spi_driver = {
.driver = {
.name = "wm8988",
- .owner = THIS_MODULE,
},
.probe = wm8988_spi_probe,
.remove = wm8988_spi_remove,
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index eda52a96c1fa..24500bafb0a8 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -2246,7 +2246,6 @@ static int wm8995_spi_remove(struct spi_device *spi)
static struct spi_driver wm8995_spi_driver = {
.driver = {
.name = "wm8995",
- .owner = THIS_MODULE,
},
.probe = wm8995_spi_probe,
.remove = wm8995_spi_remove
diff --git a/virt/Makefile b/virt/Makefile
new file mode 100644
index 000000000000..be783472ac81
--- /dev/null
+++ b/virt/Makefile
@@ -0,0 +1 @@
+obj-y += lib/
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index e2c876d5a03b..7a79b6853583 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -46,4 +46,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT
config KVM_COMPAT
def_bool y
- depends on COMPAT && !S390
+ depends on KVM && COMPAT && !S390
+
+config HAVE_KVM_IRQ_BYPASS
+ bool
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index b9d3a32cbc04..21a0ab2d8919 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -28,6 +28,8 @@
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
+#include "trace.h"
+
static struct timecounter *timecounter;
static struct workqueue_struct *wqueue;
static unsigned int host_vtimer_irq;
@@ -59,18 +61,6 @@ static void timer_disarm(struct arch_timer_cpu *timer)
}
}
-static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
-{
- int ret;
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-
- kvm_vgic_set_phys_irq_active(timer->map, true);
- ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
- timer->map,
- timer->irq->level);
- WARN_ON(ret);
-}
-
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
{
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
@@ -111,14 +101,20 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
return HRTIMER_NORESTART;
}
+static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ return !(timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
+ (timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE);
+}
+
bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
cycle_t cval, now;
- if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
- !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE) ||
- kvm_vgic_get_phys_irq_active(timer->map))
+ if (!kvm_timer_irq_can_fire(vcpu))
return false;
cval = timer->cntv_cval;
@@ -127,12 +123,94 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
return cval <= now;
}
+static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
+{
+ int ret;
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ BUG_ON(!vgic_initialized(vcpu->kvm));
+
+ timer->irq.level = new_level;
+ trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq,
+ timer->irq.level);
+ ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
+ timer->map,
+ timer->irq.level);
+ WARN_ON(ret);
+}
+
+/*
+ * Check if there was a change in the timer state (should we raise or lower
+ * the line level to the GIC).
+ */
+static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ /*
+ * If userspace modified the timer registers via SET_ONE_REG before
+ * the vgic was initialized, we mustn't set the timer->irq.level value
+ * because the guest would never see the interrupt. Instead wait
+ * until we call this function from kvm_timer_flush_hwstate.
+ */
+ if (!vgic_initialized(vcpu->kvm))
+ return;
+
+ if (kvm_timer_should_fire(vcpu) != timer->irq.level)
+ kvm_timer_update_irq(vcpu, !timer->irq.level);
+}
+
+/*
+ * Schedule the background timer before calling kvm_vcpu_block, so that this
+ * thread is removed from its waitqueue and made runnable when there's a timer
+ * interrupt to handle.
+ */
+void kvm_timer_schedule(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ u64 ns;
+ cycle_t cval, now;
+
+ BUG_ON(timer_is_armed(timer));
+
+ /*
+ * No need to schedule a background timer if the guest timer has
+ * already expired, because kvm_vcpu_block will return before putting
+ * the thread to sleep.
+ */
+ if (kvm_timer_should_fire(vcpu))
+ return;
+
+ /*
+ * If the timer is not capable of raising interrupts (disabled or
+ * masked), then there's no more work for us to do.
+ */
+ if (!kvm_timer_irq_can_fire(vcpu))
+ return;
+
+ /* The timer has not yet expired, schedule a background timer */
+ cval = timer->cntv_cval;
+ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
+ ns = cyclecounter_cyc2ns(timecounter->cc,
+ cval - now,
+ timecounter->mask,
+ &timecounter->frac);
+ timer_arm(timer, ns);
+}
+
+void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ timer_disarm(timer);
+}
+
/**
* kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
* @vcpu: The vcpu pointer
*
- * Disarm any pending soft timers, since the world-switch code will write the
- * virtual timer state back to the physical CPU.
+ * Check if the virtual timer has expired while we were running in the host,
+ * and inject an interrupt if that was the case.
*/
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
{
@@ -140,28 +218,20 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
bool phys_active;
int ret;
- /*
- * We're about to run this vcpu again, so there is no need to
- * keep the background timer running, as we're about to
- * populate the CPU timer again.
- */
- timer_disarm(timer);
+ kvm_timer_update_state(vcpu);
/*
- * If the timer expired while we were not scheduled, now is the time
- * to inject it.
+ * If we enter the guest with the virtual input level to the VGIC
+ * asserted, then we have already told the VGIC what we need to, and
+ * we don't need to exit from the guest until the guest deactivates
+ * the already injected interrupt, so therefore we should set the
+ * hardware active state to prevent unnecessary exits from the guest.
+ *
+ * Conversely, if the virtual input level is deasserted, then always
+ * clear the hardware active state to ensure that hardware interrupts
+ * from the timer triggers a guest exit.
*/
- if (kvm_timer_should_fire(vcpu))
- kvm_timer_inject_irq(vcpu);
-
- /*
- * We keep track of whether the edge-triggered interrupt has been
- * signalled to the vgic/guest, and if so, we mask the interrupt and
- * the physical distributor to prevent the timer from raising a
- * physical interrupt whenever we run a guest, preventing forward
- * VCPU progress.
- */
- if (kvm_vgic_get_phys_irq_active(timer->map))
+ if (timer->irq.level)
phys_active = true;
else
phys_active = false;
@@ -176,32 +246,20 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
* kvm_timer_sync_hwstate - sync timer state from cpu
* @vcpu: The vcpu pointer
*
- * Check if the virtual timer was armed and either schedule a corresponding
- * soft timer or inject directly if already expired.
+ * Check if the virtual timer has expired while we were running in the guest,
+ * and inject an interrupt if that was the case.
*/
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- cycle_t cval, now;
- u64 ns;
BUG_ON(timer_is_armed(timer));
- if (kvm_timer_should_fire(vcpu)) {
- /*
- * Timer has already expired while we were not
- * looking. Inject the interrupt and carry on.
- */
- kvm_timer_inject_irq(vcpu);
- return;
- }
-
- cval = timer->cntv_cval;
- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-
- ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
- &timecounter->frac);
- timer_arm(timer, ns);
+ /*
+ * The guest could have modified the timer registers or the timer
+ * could have expired, update the timer state.
+ */
+ kvm_timer_update_state(vcpu);
}
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
@@ -216,7 +274,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
* kvm_vcpu_set_target(). To handle this, we determine
* vcpu timer irq number when the vcpu is reset.
*/
- timer->irq = irq;
+ timer->irq.irq = irq->irq;
/*
* The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
@@ -225,6 +283,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
* the ARMv7 architecture.
*/
timer->cntv_ctl = 0;
+ kvm_timer_update_state(vcpu);
/*
* Tell the VGIC that the virtual interrupt is tied to a
@@ -269,6 +328,8 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
default:
return -1;
}
+
+ kvm_timer_update_state(vcpu);
return 0;
}
diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h
new file mode 100644
index 000000000000..37d8b98867d5
--- /dev/null
+++ b/virt/kvm/arm/trace.h
@@ -0,0 +1,63 @@
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+
+/*
+ * Tracepoints for vgic
+ */
+TRACE_EVENT(vgic_update_irq_pending,
+ TP_PROTO(unsigned long vcpu_id, __u32 irq, bool level),
+ TP_ARGS(vcpu_id, irq, level),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_id )
+ __field( __u32, irq )
+ __field( bool, level )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->irq = irq;
+ __entry->level = level;
+ ),
+
+ TP_printk("VCPU: %ld, IRQ %d, level: %d",
+ __entry->vcpu_id, __entry->irq, __entry->level)
+);
+
+/*
+ * Tracepoints for arch_timer
+ */
+TRACE_EVENT(kvm_timer_update_irq,
+ TP_PROTO(unsigned long vcpu_id, __u32 irq, int level),
+ TP_ARGS(vcpu_id, irq, level),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_id )
+ __field( __u32, irq )
+ __field( int, level )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->irq = irq;
+ __entry->level = level;
+ ),
+
+ TP_printk("VCPU: %ld, IRQ %d, level %d",
+ __entry->vcpu_id, __entry->irq, __entry->level)
+);
+
+#endif /* _TRACE_KVM_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index 8d7b04db8471..ff02f08df74d 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -79,11 +79,7 @@ static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
lr_val |= (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT);
vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
-}
-static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
-{
if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
else
@@ -158,6 +154,7 @@ static void vgic_v2_enable(struct kvm_vcpu *vcpu)
* anyway.
*/
vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr = ~0;
/* Get the show on the road... */
vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
@@ -166,7 +163,6 @@ static void vgic_v2_enable(struct kvm_vcpu *vcpu)
static const struct vgic_ops vgic_v2_ops = {
.get_lr = vgic_v2_get_lr,
.set_lr = vgic_v2_set_lr,
- .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
.get_elrsr = vgic_v2_get_elrsr,
.get_eisr = vgic_v2_get_eisr,
.clear_eisr = vgic_v2_clear_eisr,
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 7dd5d62f10a1..487d6357b7e7 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -112,11 +112,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
}
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
-}
-static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
-{
if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
else
@@ -193,6 +189,7 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
* anyway.
*/
vgic_v3->vgic_vmcr = 0;
+ vgic_v3->vgic_elrsr = ~0;
/*
* If we are emulating a GICv3, we do it in an non-GICv2-compatible
@@ -211,7 +208,6 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
static const struct vgic_ops vgic_v3_ops = {
.get_lr = vgic_v3_get_lr,
.set_lr = vgic_v3_set_lr,
- .sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
.get_elrsr = vgic_v3_get_elrsr,
.get_eisr = vgic_v3_get_eisr,
.clear_eisr = vgic_v3_clear_eisr,
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 30489181922d..533538385d5d 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -34,6 +34,9 @@
#include <asm/kvm.h>
#include <kvm/iodev.h>
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
/*
* How the whole thing works (courtesy of Christoffer Dall):
*
@@ -102,11 +105,13 @@
#include "vgic.h"
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
-static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
+static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu);
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
+static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
int virt_irq);
+static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
static const struct vgic_ops *vgic_ops;
static const struct vgic_params *vgic;
@@ -357,6 +362,11 @@ static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
+ if (!vgic_dist_irq_get_level(vcpu, irq)) {
+ vgic_dist_irq_clear_pending(vcpu, irq);
+ if (!compute_pending_for_cpu(vcpu))
+ clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
+ }
}
static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
@@ -531,34 +541,6 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm,
return false;
}
-/*
- * If a mapped interrupt's state has been modified by the guest such that it
- * is no longer active or pending, without it have gone through the sync path,
- * then the map->active field must be cleared so the interrupt can be taken
- * again.
- */
-static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu)
-{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- struct list_head *root;
- struct irq_phys_map_entry *entry;
- struct irq_phys_map *map;
-
- rcu_read_lock();
-
- /* Check for PPIs */
- root = &vgic_cpu->irq_phys_map_list;
- list_for_each_entry_rcu(entry, root, entry) {
- map = &entry->map;
-
- if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) &&
- !vgic_irq_is_active(vcpu, map->virt_irq))
- map->active = false;
- }
-
- rcu_read_unlock();
-}
-
bool vgic_handle_clear_pending_reg(struct kvm *kvm,
struct kvm_exit_mmio *mmio,
phys_addr_t offset, int vcpu_id)
@@ -589,7 +571,6 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm,
vcpu_id, offset);
vgic_reg_access(mmio, reg, offset, mode);
- vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
vgic_update_state(kvm);
return true;
}
@@ -627,7 +608,6 @@ bool vgic_handle_clear_active_reg(struct kvm *kvm,
ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
if (mmio->is_write) {
- vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
vgic_update_state(kvm);
return true;
}
@@ -684,10 +664,9 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
vgic_reg_access(mmio, &val, offset,
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
if (mmio->is_write) {
- if (offset < 8) {
- *reg = ~0U; /* Force PPIs/SGIs to 1 */
+ /* Ignore writes to read-only SGI and PPI bits */
+ if (offset < 8)
return false;
- }
val = vgic_cfg_compress(val);
if (offset & 4) {
@@ -713,9 +692,11 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ u64 elrsr = vgic_get_elrsr(vcpu);
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
int i;
- for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
+ for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) {
struct vgic_lr lr = vgic_get_lr(vcpu, i);
/*
@@ -736,30 +717,14 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
* interrupt then move the active state to the
* distributor tracking bit.
*/
- if (lr.state & LR_STATE_ACTIVE) {
+ if (lr.state & LR_STATE_ACTIVE)
vgic_irq_set_active(vcpu, lr.irq);
- lr.state &= ~LR_STATE_ACTIVE;
- }
/*
* Reestablish the pending state on the distributor and the
- * CPU interface. It may have already been pending, but that
- * is fine, then we are only setting a few bits that were
- * already set.
+ * CPU interface and mark the LR as free for other use.
*/
- if (lr.state & LR_STATE_PENDING) {
- vgic_dist_irq_set_pending(vcpu, lr.irq);
- lr.state &= ~LR_STATE_PENDING;
- }
-
- vgic_set_lr(vcpu, i, lr);
-
- /*
- * Mark the LR as free for other use.
- */
- BUG_ON(lr.state & LR_STATE_MASK);
- vgic_retire_lr(i, lr.irq, vcpu);
- vgic_irq_clear_queued(vcpu, lr.irq);
+ vgic_retire_lr(i, vcpu);
/* Finally update the VGIC state. */
vgic_update_state(vcpu->kvm);
@@ -1067,12 +1032,6 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
vgic_ops->set_lr(vcpu, lr, vlr);
}
-static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr vlr)
-{
- vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
-}
-
static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
{
return vgic_ops->get_elrsr(vcpu);
@@ -1118,25 +1077,23 @@ static inline void vgic_enable(struct kvm_vcpu *vcpu)
vgic_ops->enable(vcpu);
}
-static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
+static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
+ vgic_irq_clear_queued(vcpu, vlr.irq);
+
/*
* We must transfer the pending state back to the distributor before
* retiring the LR, otherwise we may loose edge-triggered interrupts.
*/
if (vlr.state & LR_STATE_PENDING) {
- vgic_dist_irq_set_pending(vcpu, irq);
+ vgic_dist_irq_set_pending(vcpu, vlr.irq);
vlr.hwirq = 0;
}
vlr.state = 0;
vgic_set_lr(vcpu, lr_nr, vlr);
- clear_bit(lr_nr, vgic_cpu->lr_used);
- vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
- vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
}
/*
@@ -1150,17 +1107,15 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
*/
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ u64 elrsr = vgic_get_elrsr(vcpu);
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
int lr;
- for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
+ for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
- if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
- vgic_retire_lr(lr, vlr.irq, vcpu);
- if (vgic_irq_is_queued(vcpu, vlr.irq))
- vgic_irq_clear_queued(vcpu, vlr.irq);
- }
+ if (!vgic_irq_is_enabled(vcpu, vlr.irq))
+ vgic_retire_lr(lr, vcpu);
}
}
@@ -1200,7 +1155,6 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
}
vgic_set_lr(vcpu, lr_nr, vlr);
- vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
}
/*
@@ -1210,8 +1164,9 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
*/
bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ u64 elrsr = vgic_get_elrsr(vcpu);
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
struct vgic_lr vlr;
int lr;
@@ -1222,28 +1177,22 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
kvm_debug("Queue IRQ%d\n", irq);
- lr = vgic_cpu->vgic_irq_lr_map[irq];
-
/* Do we have an active interrupt for the same CPUID? */
- if (lr != LR_EMPTY) {
+ for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
vlr = vgic_get_lr(vcpu, lr);
- if (vlr.source == sgi_source_id) {
+ if (vlr.irq == irq && vlr.source == sgi_source_id) {
kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
- BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
return true;
}
}
/* Try to use another LR for this interrupt */
- lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
- vgic->nr_lr);
+ lr = find_first_bit(elrsr_ptr, vgic->nr_lr);
if (lr >= vgic->nr_lr)
return false;
kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
- vgic_cpu->vgic_irq_lr_map[irq] = lr;
- set_bit(lr, vgic_cpu->lr_used);
vlr.irq = irq;
vlr.source = sgi_source_id;
@@ -1338,12 +1287,60 @@ epilog:
}
}
+static int process_queued_irq(struct kvm_vcpu *vcpu,
+ int lr, struct vgic_lr vlr)
+{
+ int pending = 0;
+
+ /*
+ * If the IRQ was EOIed (called from vgic_process_maintenance) or it
+ * went from active to non-active (called from vgic_sync_hwirq) it was
+ * also ACKed and we we therefore assume we can clear the soft pending
+ * state (should it had been set) for this interrupt.
+ *
+ * Note: if the IRQ soft pending state was set after the IRQ was
+ * acked, it actually shouldn't be cleared, but we have no way of
+ * knowing that unless we start trapping ACKs when the soft-pending
+ * state is set.
+ */
+ vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
+
+ /*
+ * Tell the gic to start sampling this interrupt again.
+ */
+ vgic_irq_clear_queued(vcpu, vlr.irq);
+
+ /* Any additional pending interrupt? */
+ if (vgic_irq_is_edge(vcpu, vlr.irq)) {
+ BUG_ON(!(vlr.state & LR_HW));
+ pending = vgic_dist_irq_is_pending(vcpu, vlr.irq);
+ } else {
+ if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
+ vgic_cpu_irq_set(vcpu, vlr.irq);
+ pending = 1;
+ } else {
+ vgic_dist_irq_clear_pending(vcpu, vlr.irq);
+ vgic_cpu_irq_clear(vcpu, vlr.irq);
+ }
+ }
+
+ /*
+ * Despite being EOIed, the LR may not have
+ * been marked as empty.
+ */
+ vlr.state = 0;
+ vlr.hwirq = 0;
+ vgic_set_lr(vcpu, lr, vlr);
+
+ return pending;
+}
+
static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
{
u32 status = vgic_get_interrupt_status(vcpu);
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- bool level_pending = false;
struct kvm *kvm = vcpu->kvm;
+ int level_pending = 0;
kvm_debug("STATUS = %08x\n", status);
@@ -1358,54 +1355,22 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
- WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
- spin_lock(&dist->lock);
- vgic_irq_clear_queued(vcpu, vlr.irq);
+ WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
WARN_ON(vlr.state & LR_STATE_MASK);
- vlr.state = 0;
- vgic_set_lr(vcpu, lr, vlr);
- /*
- * If the IRQ was EOIed it was also ACKed and we we
- * therefore assume we can clear the soft pending
- * state (should it had been set) for this interrupt.
- *
- * Note: if the IRQ soft pending state was set after
- * the IRQ was acked, it actually shouldn't be
- * cleared, but we have no way of knowing that unless
- * we start trapping ACKs when the soft-pending state
- * is set.
- */
- vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
/*
* kvm_notify_acked_irq calls kvm_set_irq()
- * to reset the IRQ level. Need to release the
- * lock for kvm_set_irq to grab it.
+ * to reset the IRQ level, which grabs the dist->lock
+ * so we call this before taking the dist->lock.
*/
- spin_unlock(&dist->lock);
-
kvm_notify_acked_irq(kvm, 0,
vlr.irq - VGIC_NR_PRIVATE_IRQS);
- spin_lock(&dist->lock);
-
- /* Any additional pending interrupt? */
- if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
- vgic_cpu_irq_set(vcpu, vlr.irq);
- level_pending = true;
- } else {
- vgic_dist_irq_clear_pending(vcpu, vlr.irq);
- vgic_cpu_irq_clear(vcpu, vlr.irq);
- }
+ spin_lock(&dist->lock);
+ level_pending |= process_queued_irq(vcpu, lr, vlr);
spin_unlock(&dist->lock);
-
- /*
- * Despite being EOIed, the LR may not have
- * been marked as empty.
- */
- vgic_sync_lr_elrsr(vcpu, lr, vlr);
}
}
@@ -1426,35 +1391,40 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
/*
* Save the physical active state, and reset it to inactive.
*
- * Return 1 if HW interrupt went from active to inactive, and 0 otherwise.
+ * Return true if there's a pending forwarded interrupt to queue.
*/
-static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
+static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct irq_phys_map *map;
+ bool phys_active;
+ bool level_pending;
int ret;
if (!(vlr.state & LR_HW))
- return 0;
+ return false;
map = vgic_irq_map_search(vcpu, vlr.irq);
BUG_ON(!map);
ret = irq_get_irqchip_state(map->irq,
IRQCHIP_STATE_ACTIVE,
- &map->active);
+ &phys_active);
WARN_ON(ret);
- if (map->active)
+ if (phys_active)
return 0;
- return 1;
+ spin_lock(&dist->lock);
+ level_pending = process_queued_irq(vcpu, lr, vlr);
+ spin_unlock(&dist->lock);
+ return level_pending;
}
/* Sync back the VGIC state after a guest run */
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
u64 elrsr;
unsigned long *elrsr_ptr;
@@ -1462,40 +1432,18 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
bool level_pending;
level_pending = vgic_process_maintenance(vcpu);
- elrsr = vgic_get_elrsr(vcpu);
- elrsr_ptr = u64_to_bitmask(&elrsr);
/* Deal with HW interrupts, and clear mappings for empty LRs */
for (lr = 0; lr < vgic->nr_lr; lr++) {
- struct vgic_lr vlr;
-
- if (!test_bit(lr, vgic_cpu->lr_used))
- continue;
-
- vlr = vgic_get_lr(vcpu, lr);
- if (vgic_sync_hwirq(vcpu, vlr)) {
- /*
- * So this is a HW interrupt that the guest
- * EOI-ed. Clean the LR state and allow the
- * interrupt to be sampled again.
- */
- vlr.state = 0;
- vlr.hwirq = 0;
- vgic_set_lr(vcpu, lr, vlr);
- vgic_irq_clear_queued(vcpu, vlr.irq);
- set_bit(lr, elrsr_ptr);
- }
-
- if (!test_bit(lr, elrsr_ptr))
- continue;
-
- clear_bit(lr, vgic_cpu->lr_used);
+ struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
+ level_pending |= vgic_sync_hwirq(vcpu, lr, vlr);
BUG_ON(vlr.irq >= dist->nr_irqs);
- vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
}
/* Check if we still have something up our sleeve... */
+ elrsr = vgic_get_elrsr(vcpu);
+ elrsr_ptr = u64_to_bitmask(&elrsr);
pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
if (level_pending || pending < vgic->nr_lr)
set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
@@ -1585,6 +1533,8 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
int enabled;
bool ret = true, can_inject = true;
+ trace_vgic_update_irq_pending(cpuid, irq_num, level);
+
if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
return -EINVAL;
@@ -1864,30 +1814,6 @@ static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
}
/**
- * kvm_vgic_get_phys_irq_active - Return the active state of a mapped IRQ
- *
- * Return the logical active state of a mapped interrupt. This doesn't
- * necessarily reflects the current HW state.
- */
-bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map)
-{
- BUG_ON(!map);
- return map->active;
-}
-
-/**
- * kvm_vgic_set_phys_irq_active - Set the active state of a mapped IRQ
- *
- * Set the logical active state of a mapped interrupt. This doesn't
- * immediately affects the HW state.
- */
-void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active)
-{
- BUG_ON(!map);
- map->active = active;
-}
-
-/**
* kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
* @vcpu: The VCPU pointer
* @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq
@@ -1942,12 +1868,10 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
kfree(vgic_cpu->pending_shared);
kfree(vgic_cpu->active_shared);
kfree(vgic_cpu->pend_act_shared);
- kfree(vgic_cpu->vgic_irq_lr_map);
vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
vgic_cpu->pending_shared = NULL;
vgic_cpu->active_shared = NULL;
vgic_cpu->pend_act_shared = NULL;
- vgic_cpu->vgic_irq_lr_map = NULL;
}
static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
@@ -1958,18 +1882,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
- vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
if (!vgic_cpu->pending_shared
|| !vgic_cpu->active_shared
- || !vgic_cpu->pend_act_shared
- || !vgic_cpu->vgic_irq_lr_map) {
+ || !vgic_cpu->pend_act_shared) {
kvm_vgic_vcpu_destroy(vcpu);
return -ENOMEM;
}
- memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
-
/*
* Store the number of LRs per vcpu, so we don't have to go
* all the way to the distributor structure to find out. Only
@@ -2111,14 +2031,24 @@ int vgic_init(struct kvm *kvm)
break;
}
- for (i = 0; i < dist->nr_irqs; i++) {
- if (i < VGIC_NR_PPIS)
+ /*
+ * Enable and configure all SGIs to be edge-triggere and
+ * configure all PPIs as level-triggered.
+ */
+ for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
+ if (i < VGIC_NR_SGIS) {
+ /* SGIs */
vgic_bitmap_set_irq_val(&dist->irq_enabled,
vcpu->vcpu_id, i, 1);
- if (i < VGIC_NR_PRIVATE_IRQS)
vgic_bitmap_set_irq_val(&dist->irq_cfg,
vcpu->vcpu_id, i,
VGIC_CFG_EDGE);
+ } else if (i < VGIC_NR_PRIVATE_IRQS) {
+ /* PPIs */
+ vgic_bitmap_set_irq_val(&dist->irq_cfg,
+ vcpu->vcpu_id, i,
+ VGIC_CFG_LEVEL);
+ }
}
vgic_enable(vcpu);
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 44660aee335f..77d42be6970e 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -94,6 +94,10 @@ static void async_pf_execute(struct work_struct *work)
trace_kvm_async_pf_completed(addr, gva);
+ /*
+ * This memory barrier pairs with prepare_to_wait's set_current_state()
+ */
+ smp_mb();
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 79db45336e3a..46dbc0a7dfc1 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -23,6 +23,7 @@
#include <linux/kvm_host.h>
#include <linux/kvm.h>
+#include <linux/kvm_irqfd.h>
#include <linux/workqueue.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
@@ -34,73 +35,20 @@
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/seqlock.h>
+#include <linux/irqbypass.h>
#include <trace/events/kvm.h>
#include <kvm/iodev.h>
#ifdef CONFIG_HAVE_KVM_IRQFD
-/*
- * --------------------------------------------------------------------
- * irqfd: Allows an fd to be used to inject an interrupt to the guest
- *
- * Credit goes to Avi Kivity for the original idea.
- * --------------------------------------------------------------------
- */
-
-/*
- * Resampling irqfds are a special variety of irqfds used to emulate
- * level triggered interrupts. The interrupt is asserted on eventfd
- * trigger. On acknowledgement through the irq ack notifier, the
- * interrupt is de-asserted and userspace is notified through the
- * resamplefd. All resamplers on the same gsi are de-asserted
- * together, so we don't need to track the state of each individual
- * user. We can also therefore share the same irq source ID.
- */
-struct _irqfd_resampler {
- struct kvm *kvm;
- /*
- * List of resampling struct _irqfd objects sharing this gsi.
- * RCU list modified under kvm->irqfds.resampler_lock
- */
- struct list_head list;
- struct kvm_irq_ack_notifier notifier;
- /*
- * Entry in list of kvm->irqfd.resampler_list. Use for sharing
- * resamplers among irqfds on the same gsi.
- * Accessed and modified under kvm->irqfds.resampler_lock
- */
- struct list_head link;
-};
-
-struct _irqfd {
- /* Used for MSI fast-path */
- struct kvm *kvm;
- wait_queue_t wait;
- /* Update side is protected by irqfds.lock */
- struct kvm_kernel_irq_routing_entry irq_entry;
- seqcount_t irq_entry_sc;
- /* Used for level IRQ fast-path */
- int gsi;
- struct work_struct inject;
- /* The resampler used by this irqfd (resampler-only) */
- struct _irqfd_resampler *resampler;
- /* Eventfd notified on resample (resampler-only) */
- struct eventfd_ctx *resamplefd;
- /* Entry in list of irqfds for a resampler (resampler-only) */
- struct list_head resampler_link;
- /* Used for setup/shutdown */
- struct eventfd_ctx *eventfd;
- struct list_head list;
- poll_table pt;
- struct work_struct shutdown;
-};
static struct workqueue_struct *irqfd_cleanup_wq;
static void
irqfd_inject(struct work_struct *work)
{
- struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(work, struct kvm_kernel_irqfd, inject);
struct kvm *kvm = irqfd->kvm;
if (!irqfd->resampler) {
@@ -121,12 +69,13 @@ irqfd_inject(struct work_struct *work)
static void
irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
{
- struct _irqfd_resampler *resampler;
+ struct kvm_kernel_irqfd_resampler *resampler;
struct kvm *kvm;
- struct _irqfd *irqfd;
+ struct kvm_kernel_irqfd *irqfd;
int idx;
- resampler = container_of(kian, struct _irqfd_resampler, notifier);
+ resampler = container_of(kian,
+ struct kvm_kernel_irqfd_resampler, notifier);
kvm = resampler->kvm;
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
@@ -141,9 +90,9 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
}
static void
-irqfd_resampler_shutdown(struct _irqfd *irqfd)
+irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
{
- struct _irqfd_resampler *resampler = irqfd->resampler;
+ struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
struct kvm *kvm = resampler->kvm;
mutex_lock(&kvm->irqfds.resampler_lock);
@@ -168,7 +117,8 @@ irqfd_resampler_shutdown(struct _irqfd *irqfd)
static void
irqfd_shutdown(struct work_struct *work)
{
- struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(work, struct kvm_kernel_irqfd, shutdown);
u64 cnt;
/*
@@ -191,6 +141,9 @@ irqfd_shutdown(struct work_struct *work)
/*
* It is now safe to release the object's resources
*/
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+ irq_bypass_unregister_consumer(&irqfd->consumer);
+#endif
eventfd_ctx_put(irqfd->eventfd);
kfree(irqfd);
}
@@ -198,7 +151,7 @@ irqfd_shutdown(struct work_struct *work)
/* assumes kvm->irqfds.lock is held */
static bool
-irqfd_is_active(struct _irqfd *irqfd)
+irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
{
return list_empty(&irqfd->list) ? false : true;
}
@@ -209,7 +162,7 @@ irqfd_is_active(struct _irqfd *irqfd)
* assumes kvm->irqfds.lock is held
*/
static void
-irqfd_deactivate(struct _irqfd *irqfd)
+irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
{
BUG_ON(!irqfd_is_active(irqfd));
@@ -218,13 +171,23 @@ irqfd_deactivate(struct _irqfd *irqfd)
queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
}
+int __attribute__((weak)) kvm_arch_set_irq_inatomic(
+ struct kvm_kernel_irq_routing_entry *irq,
+ struct kvm *kvm, int irq_source_id,
+ int level,
+ bool line_status)
+{
+ return -EWOULDBLOCK;
+}
+
/*
* Called with wqh->lock held and interrupts disabled
*/
static int
irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
- struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(wait, struct kvm_kernel_irqfd, wait);
unsigned long flags = (unsigned long)key;
struct kvm_kernel_irq_routing_entry irq;
struct kvm *kvm = irqfd->kvm;
@@ -238,10 +201,9 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
irq = irqfd->irq_entry;
} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
/* An event has been signaled, inject an interrupt */
- if (irq.type == KVM_IRQ_ROUTING_MSI)
- kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
- false);
- else
+ if (kvm_arch_set_irq_inatomic(&irq, kvm,
+ KVM_USERSPACE_IRQ_SOURCE_ID, 1,
+ false) == -EWOULDBLOCK)
schedule_work(&irqfd->inject);
srcu_read_unlock(&kvm->irq_srcu, idx);
}
@@ -274,37 +236,54 @@ static void
irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
- struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(pt, struct kvm_kernel_irqfd, pt);
add_wait_queue(wqh, &irqfd->wait);
}
/* Must be called under irqfds.lock */
-static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd)
+static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
- int i, n_entries;
+ int n_entries;
n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
write_seqcount_begin(&irqfd->irq_entry_sc);
- irqfd->irq_entry.type = 0;
-
e = entries;
- for (i = 0; i < n_entries; ++i, ++e) {
- /* Only fast-path MSI. */
- if (e->type == KVM_IRQ_ROUTING_MSI)
- irqfd->irq_entry = *e;
- }
+ if (n_entries == 1)
+ irqfd->irq_entry = *e;
+ else
+ irqfd->irq_entry.type = 0;
write_seqcount_end(&irqfd->irq_entry_sc);
}
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+void __attribute__((weak)) kvm_arch_irq_bypass_stop(
+ struct irq_bypass_consumer *cons)
+{
+}
+
+void __attribute__((weak)) kvm_arch_irq_bypass_start(
+ struct irq_bypass_consumer *cons)
+{
+}
+
+int __attribute__((weak)) kvm_arch_update_irqfd_routing(
+ struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set)
+{
+ return 0;
+}
+#endif
+
static int
kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
{
- struct _irqfd *irqfd, *tmp;
+ struct kvm_kernel_irqfd *irqfd, *tmp;
struct fd f;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
int ret;
@@ -340,7 +319,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
irqfd->eventfd = eventfd;
if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
- struct _irqfd_resampler *resampler;
+ struct kvm_kernel_irqfd_resampler *resampler;
resamplefd = eventfd_ctx_fdget(args->resamplefd);
if (IS_ERR(resamplefd)) {
@@ -428,6 +407,17 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
* we might race against the POLLHUP
*/
fdput(f);
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+ irqfd->consumer.token = (void *)irqfd->eventfd;
+ irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
+ irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
+ irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
+ irqfd->consumer.start = kvm_arch_irq_bypass_start;
+ ret = irq_bypass_register_consumer(&irqfd->consumer);
+ if (ret)
+ pr_info("irq bypass consumer (token %p) registration fails: %d\n",
+ irqfd->consumer.token, ret);
+#endif
return 0;
@@ -469,9 +459,18 @@ bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
}
EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
-void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
+void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
{
struct kvm_irq_ack_notifier *kian;
+
+ hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
+ link)
+ if (kian->gsi == gsi)
+ kian->irq_acked(kian);
+}
+
+void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
+{
int gsi, idx;
trace_kvm_ack_irq(irqchip, pin);
@@ -479,10 +478,7 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
idx = srcu_read_lock(&kvm->irq_srcu);
gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
if (gsi != -1)
- hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
- link)
- if (kian->gsi == gsi)
- kian->irq_acked(kian);
+ kvm_notify_acked_gsi(kvm, gsi);
srcu_read_unlock(&kvm->irq_srcu, idx);
}
@@ -525,7 +521,7 @@ kvm_eventfd_init(struct kvm *kvm)
static int
kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
{
- struct _irqfd *irqfd, *tmp;
+ struct kvm_kernel_irqfd *irqfd, *tmp;
struct eventfd_ctx *eventfd;
eventfd = eventfd_ctx_fdget(args->fd);
@@ -581,7 +577,7 @@ kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
void
kvm_irqfd_release(struct kvm *kvm)
{
- struct _irqfd *irqfd, *tmp;
+ struct kvm_kernel_irqfd *irqfd, *tmp;
spin_lock_irq(&kvm->irqfds.lock);
@@ -604,13 +600,23 @@ kvm_irqfd_release(struct kvm *kvm)
*/
void kvm_irq_routing_update(struct kvm *kvm)
{
- struct _irqfd *irqfd;
+ struct kvm_kernel_irqfd *irqfd;
spin_lock_irq(&kvm->irqfds.lock);
- list_for_each_entry(irqfd, &kvm->irqfds.items, list)
+ list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
irqfd_update(kvm, irqfd);
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+ if (irqfd->producer) {
+ int ret = kvm_arch_update_irqfd_routing(
+ irqfd->kvm, irqfd->producer->irq,
+ irqfd->gsi, 1);
+ WARN_ON(ret);
+ }
+#endif
+ }
+
spin_unlock_irq(&kvm->irqfds.lock);
}
@@ -914,9 +920,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
return -EINVAL;
/* ioeventfd with no length can't be combined with DATAMATCH */
- if (!args->len &&
- args->flags & (KVM_IOEVENTFD_FLAG_PIO |
- KVM_IOEVENTFD_FLAG_DATAMATCH))
+ if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
return -EINVAL;
ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
index d7ea8e20dae4..f0b08a2a48ba 100644
--- a/virt/kvm/irqchip.c
+++ b/virt/kvm/irqchip.c
@@ -31,16 +31,6 @@
#include <trace/events/kvm.h>
#include "irq.h"
-struct kvm_irq_routing_table {
- int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
- u32 nr_rt_entries;
- /*
- * Array indexed by gsi. Each entry contains list of irq chips
- * the gsi is connected to.
- */
- struct hlist_head map[0];
-};
-
int kvm_irq_map_gsi(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *entries, int gsi)
{
@@ -154,11 +144,11 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
/*
* Do not allow GSI to be mapped to the same irqchip more than once.
- * Allow only one to one mapping between GSI and MSI.
+ * Allow only one to one mapping between GSI and non-irqchip routing.
*/
hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
- if (ei->type == KVM_IRQ_ROUTING_MSI ||
- ue->type == KVM_IRQ_ROUTING_MSI ||
+ if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
+ ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
ue->u.irqchip.irqchip == ei->irqchip.irqchip)
return r;
@@ -231,6 +221,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
kvm_irq_routing_update(kvm);
mutex_unlock(&kvm->irq_lock);
+ kvm_arch_irq_routing_update(kvm);
+
synchronize_srcu_expedited(&kvm->irq_srcu);
new = old;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8db1d9361993..484079efea5b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -230,6 +230,9 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
init_waitqueue_head(&vcpu->wq);
kvm_async_pf_vcpu_init(vcpu);
+ vcpu->pre_pcpu = -1;
+ INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
+
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
r = -ENOMEM;
@@ -2018,6 +2021,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
} while (single_task_running() && ktime_before(cur, stop));
}
+ kvm_arch_vcpu_blocking(vcpu);
+
for (;;) {
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
@@ -2031,6 +2036,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
finish_wait(&vcpu->wq, &wait);
cur = ktime_get();
+ kvm_arch_vcpu_unblocking(vcpu);
out:
block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
@@ -2718,6 +2724,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_IRQFD:
case KVM_CAP_IRQFD_RESAMPLE:
#endif
+ case KVM_CAP_IOEVENTFD_ANY_LENGTH:
case KVM_CAP_CHECK_EXTENSION_VM:
return 1;
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
@@ -3341,7 +3348,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
return -ENOSPC;
- new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+ new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) *
sizeof(struct kvm_io_range)), GFP_KERNEL);
if (!new_bus)
return -ENOMEM;
@@ -3373,7 +3380,7 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
if (r)
return r;
- new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
+ new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
sizeof(struct kvm_io_range)), GFP_KERNEL);
if (!new_bus)
return -ENOMEM;
diff --git a/virt/lib/Kconfig b/virt/lib/Kconfig
new file mode 100644
index 000000000000..89a414f815d2
--- /dev/null
+++ b/virt/lib/Kconfig
@@ -0,0 +1,2 @@
+config IRQ_BYPASS_MANAGER
+ tristate
diff --git a/virt/lib/Makefile b/virt/lib/Makefile
new file mode 100644
index 000000000000..901228d1ffbc
--- /dev/null
+++ b/virt/lib/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_IRQ_BYPASS_MANAGER) += irqbypass.o
diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
new file mode 100644
index 000000000000..09a03b5a21ff
--- /dev/null
+++ b/virt/lib/irqbypass.c
@@ -0,0 +1,257 @@
+/*
+ * IRQ offload/bypass manager
+ *
+ * Copyright (C) 2015 Red Hat, Inc.
+ * Copyright (c) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Various virtualization hardware acceleration techniques allow bypassing or
+ * offloading interrupts received from devices around the host kernel. Posted
+ * Interrupts on Intel VT-d systems can allow interrupts to be received
+ * directly by a virtual machine. ARM IRQ Forwarding allows forwarded physical
+ * interrupts to be directly deactivated by the guest. This manager allows
+ * interrupt producers and consumers to find each other to enable this sort of
+ * bypass.
+ */
+
+#include <linux/irqbypass.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IRQ bypass manager utility module");
+
+static LIST_HEAD(producers);
+static LIST_HEAD(consumers);
+static DEFINE_MUTEX(lock);
+
+/* @lock must be held when calling connect */
+static int __connect(struct irq_bypass_producer *prod,
+ struct irq_bypass_consumer *cons)
+{
+ int ret = 0;
+
+ if (prod->stop)
+ prod->stop(prod);
+ if (cons->stop)
+ cons->stop(cons);
+
+ if (prod->add_consumer)
+ ret = prod->add_consumer(prod, cons);
+
+ if (!ret) {
+ ret = cons->add_producer(cons, prod);
+ if (ret && prod->del_consumer)
+ prod->del_consumer(prod, cons);
+ }
+
+ if (cons->start)
+ cons->start(cons);
+ if (prod->start)
+ prod->start(prod);
+
+ return ret;
+}
+
+/* @lock must be held when calling disconnect */
+static void __disconnect(struct irq_bypass_producer *prod,
+ struct irq_bypass_consumer *cons)
+{
+ if (prod->stop)
+ prod->stop(prod);
+ if (cons->stop)
+ cons->stop(cons);
+
+ cons->del_producer(cons, prod);
+
+ if (prod->del_consumer)
+ prod->del_consumer(prod, cons);
+
+ if (cons->start)
+ cons->start(cons);
+ if (prod->start)
+ prod->start(prod);
+}
+
+/**
+ * irq_bypass_register_producer - register IRQ bypass producer
+ * @producer: pointer to producer structure
+ *
+ * Add the provided IRQ producer to the list of producers and connect
+ * with any matching token found on the IRQ consumers list.
+ */
+int irq_bypass_register_producer(struct irq_bypass_producer *producer)
+{
+ struct irq_bypass_producer *tmp;
+ struct irq_bypass_consumer *consumer;
+
+ might_sleep();
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(tmp, &producers, node) {
+ if (tmp->token == producer->token) {
+ mutex_unlock(&lock);
+ module_put(THIS_MODULE);
+ return -EBUSY;
+ }
+ }
+
+ list_for_each_entry(consumer, &consumers, node) {
+ if (consumer->token == producer->token) {
+ int ret = __connect(producer, consumer);
+ if (ret) {
+ mutex_unlock(&lock);
+ module_put(THIS_MODULE);
+ return ret;
+ }
+ break;
+ }
+ }
+
+ list_add(&producer->node, &producers);
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_bypass_register_producer);
+
+/**
+ * irq_bypass_unregister_producer - unregister IRQ bypass producer
+ * @producer: pointer to producer structure
+ *
+ * Remove a previously registered IRQ producer from the list of producers
+ * and disconnect it from any connected IRQ consumer.
+ */
+void irq_bypass_unregister_producer(struct irq_bypass_producer *producer)
+{
+ struct irq_bypass_producer *tmp;
+ struct irq_bypass_consumer *consumer;
+
+ might_sleep();
+
+ if (!try_module_get(THIS_MODULE))
+ return; /* nothing in the list anyway */
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(tmp, &producers, node) {
+ if (tmp->token != producer->token)
+ continue;
+
+ list_for_each_entry(consumer, &consumers, node) {
+ if (consumer->token == producer->token) {
+ __disconnect(producer, consumer);
+ break;
+ }
+ }
+
+ list_del(&producer->node);
+ module_put(THIS_MODULE);
+ break;
+ }
+
+ mutex_unlock(&lock);
+
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(irq_bypass_unregister_producer);
+
+/**
+ * irq_bypass_register_consumer - register IRQ bypass consumer
+ * @consumer: pointer to consumer structure
+ *
+ * Add the provided IRQ consumer to the list of consumers and connect
+ * with any matching token found on the IRQ producer list.
+ */
+int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
+{
+ struct irq_bypass_consumer *tmp;
+ struct irq_bypass_producer *producer;
+
+ if (!consumer->add_producer || !consumer->del_producer)
+ return -EINVAL;
+
+ might_sleep();
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(tmp, &consumers, node) {
+ if (tmp->token == consumer->token) {
+ mutex_unlock(&lock);
+ module_put(THIS_MODULE);
+ return -EBUSY;
+ }
+ }
+
+ list_for_each_entry(producer, &producers, node) {
+ if (producer->token == consumer->token) {
+ int ret = __connect(producer, consumer);
+ if (ret) {
+ mutex_unlock(&lock);
+ module_put(THIS_MODULE);
+ return ret;
+ }
+ break;
+ }
+ }
+
+ list_add(&consumer->node, &consumers);
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_bypass_register_consumer);
+
+/**
+ * irq_bypass_unregister_consumer - unregister IRQ bypass consumer
+ * @consumer: pointer to consumer structure
+ *
+ * Remove a previously registered IRQ consumer from the list of consumers
+ * and disconnect it from any connected IRQ producer.
+ */
+void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
+{
+ struct irq_bypass_consumer *tmp;
+ struct irq_bypass_producer *producer;
+
+ might_sleep();
+
+ if (!try_module_get(THIS_MODULE))
+ return; /* nothing in the list anyway */
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(tmp, &consumers, node) {
+ if (tmp->token != consumer->token)
+ continue;
+
+ list_for_each_entry(producer, &producers, node) {
+ if (producer->token == consumer->token) {
+ __disconnect(producer, consumer);
+ break;
+ }
+ }
+
+ list_del(&consumer->node);
+ module_put(THIS_MODULE);
+ break;
+ }
+
+ mutex_unlock(&lock);
+
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(irq_bypass_unregister_consumer);